code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import pygame
import time
import os
from global_variables import COLORS, ICONS, ICON_FONT_FILE, FONTS
from pygame.locals import *
from sunquest import *
from string import maketrans
Uppercase = maketrans("7894561230",
'SMTWHF X ')
DEBUG = False
class VirtualKeyboard():
''' Implement a basic full screen virtual keyboard for touchscreens '''
def __init__(self, screen, color, validate=True):
# SCREEN SETTINGS
self.screen = screen
self.rect = self.screen.get_rect()
self.w = self.rect.width
self.h = self.rect.height
# create a background surface
self.background = pygame.Surface(self.rect.size)
self.screen.fill(COLORS['CLOUD'])
# KEY SETTINGS
self.keyW = int((self.w) / 4) # key width with border
self.keyH = int((self.h) / 5) # key height
self.x = (self.w - self.keyW * 4) / 2 # centered
self.y = 0 # stay away from the edges (better touch)
# pygame.font.init() # Just in case
self.keyFont = FONTS['key_font']['font']
self.fa = pygame.font.Font(
ICONS.font_location, int(
self.keyH * 0.70)) # keyboard font
self.textW = self.w # + 4 # leave room for escape key
self.textH = self.keyH
self.color = COLORS[color]
self.caps = False
self.keys = []
self.addkeys() # add all the keys
# self.paintkeys() # paint all the keys
self.validate = validate
# pygame.display.update()
def run(self, text=''):
self.screen.fill(COLORS['CLOUD'])
self.text = text
# create an input text box
# create a text input box with room for 2 lines of text. leave room for
# the escape key
self.input = TextInput(
self.screen,
self.text,
self.x,
self.y,
self.textW,
self.textH)
self.caps = False
self.togglecaps()
self.paintkeys()
counter = 0
# main event loop (hog all processes since we're on top, but someone might want
# to rewrite this to be more event based...
while True:
time.sleep(0.1) # 10/second is often enough
events = pygame.event.get()
if events is not None:
for e in events:
if (e.type == MOUSEBUTTONDOWN):
self.selectatmouse()
if (e.type == MOUSEBUTTONUP):
if self.clickatmouse():
# user clicked enter or escape if returns True
if self.input.text == '':
self.clear()
# Return what the user entered
return self.input.text
else:
if DEBUG:
return self.input.text
else:
if self.validate == False:
return self.input.text
else:
if sunquest_fix(self.input.text) != None:
self.clear()
return sunquest_fix(self.input.text)
else:
self.paintkeys()
temp = self.input.text
self.input.text = 'invalid'
self.input.cursorvis = False
self.input.draw()
time.sleep(1)
self.input.text = temp
self.input.draw()
# print "invalid"
# self.clear()
if (e.type == MOUSEMOTION):
if e.buttons[0] == 1:
# user click-dragged to a different key?
self.selectatmouse()
counter += 1
# print self.input.cursorvis
if counter > 10:
self.input.flashcursor()
counter = 0
def invalid_entry(self):
self.clear()
def unselectall(self, force=False):
''' Force all the keys to be unselected
Marks any that change as dirty to redraw '''
for key in self.keys:
if key.selected:
key.selected = False
key.dirty = True
def clickatmouse(self):
# ''' Check to see if the user is pressing down on a key and draw it selected '''
# self.screen.blit(self.screenCopy, (0,0))
self.unselectall()
for key in self.keys:
keyrect = Rect(key.x, key.y, key.w, key.h)
if keyrect.collidepoint(pygame.mouse.get_pos()):
key.dirty = True
if key.bskey:
self.input.backspace()
self.paintkeys()
return False
if key.fskey:
self.input.inccursor()
self.paintkeys()
return False
if key.spacekey:
self.input.addcharatcursor(' ')
self.paintkeys()
return False
if key.clear:
while self.input.cursorpos > 0:
self.input.backspace()
self.paintkeys()
return False
if key.shiftkey:
self.togglecaps()
self.paintkeys()
return False
if key.escape:
self.input.text = '' # clear input
return True
if key.enter:
return True
if self.caps:
keycap = key.caption.translate(Uppercase)
else:
keycap = key.caption
self.input.addcharatcursor(keycap)
if self.caps:
self.togglecaps()
self.paintkeys()
return False
self.paintkeys()
return False
def togglecaps(self):
''' Toggle uppercase / lowercase '''
if self.caps:
self.caps = False
else:
self.caps = True
for key in self.keys:
key.dirty = True
def selectatmouse(self):
self.unselectall()
pos = pygame.mouse.get_pos()
if self.input.rect.collidepoint(pos):
self.input.setcursor(pos)
else:
for key in self.keys:
keyrect = Rect(key.x, key.y, key.w, key.h)
if keyrect.collidepoint(pos):
key.selected = True
key.dirty = True
self.paintkeys()
return
self.paintkeys()
def addkeys(self): # Add all the keys for the virtual keyboard
x = self.x + 1
y = self.y + self.textH # + self.keyH / 4
row = ['7', '8', '9']
for item in row:
onekey = VKey(
item,
x,
y,
self.keyW,
self.keyH,
self.keyFont, self.color)
self.keys.append(onekey)
x += self.keyW
onekey = VKey('keyboard-backspace',
x,
y,
self.keyW - 1,
self.keyH,
self.fa, self.color, special=True)
onekey.bskey = True
self.keys.append(onekey)
x += onekey.w + self.keyW / 3
y += self.keyH # overlap border
x = self.x + 1
row = ['4', '5', '6']
for item in row:
onekey = VKey(
item,
x,
y,
self.keyW,
self.keyH,
self.keyFont, self.color)
self.keys.append(onekey)
x += self.keyW
x += self.x
onekey = VKey('alphabetical',
x,
y,
self.keyW - 1,
self.keyH,
self.fa, self.color, special=True, shiftkey=True)
self.keys.append(onekey)
y += self.keyH
x = self.x + 1
row = ['1', '2', '3']
for item in row:
onekey = VKey(
item,
x,
y,
self.keyW,
self.keyH,
self.keyFont, self.color)
self.keys.append(onekey)
x += self.keyW
onekey = VKey(
'keyboard-return',
x,
y,
self.keyW - 1,
self.keyH * 2,
self.fa, self.color, special=True)
onekey.enter = True
self.keys.append(onekey)
y += self.keyH
x = self.x + 1
onekey = VKey('keyboard-close',
x,
y,
int(self.keyW),
self.keyH,
self.fa, self.color, special=True)
onekey.escape = True
self.keys.append(onekey)
x += self.keyW
onekey = VKey('0',
x,
y,
int(self.keyW),
self.keyH,
self.keyFont, self.color)
self.keys.append(onekey)
x += self.keyW
onekey = VKey('eraser',
x,
y,
int(self.keyW),
self.keyH,
self.fa, self.color, special=True)
onekey.clear = True
self.keys.append(onekey)
x += self.keyW
self.all_keys = pygame.sprite.Group()
self.all_keys.add(self.all_keys, self.keys)
def paintkeys(self):
''' Draw the keyboard (but only if they're dirty.) '''
for key in self.keys:
# pass
key.update(self.caps)
self.all_keys.draw(self.screen)
pygame.display.update()
def clear(self):
pass
''' Put the screen back to before we started '''
# self.screen.blit(self.background, (0, 0))
# pygame.display.get_surface().flip()
# pygame.display.update()
# ----------------------------------------------------------------------------
class TextInput():
''' Handles the text input box and manages the cursor '''
def __init__(self, screen, text, x, y, w, h):
self.screen = screen
self.text = text
self.cursorpos = len(text)
self.x = x
self.y = y
self.w = w - 2
self.h = h - 3
self.rect = Rect(1, 1, w, h)
self.surface_rect = Rect(0, 0, w, h)
self.layer = pygame.Surface((self.w, self.h))
self.surface = screen.subsurface(self.surface_rect)
self.max_length = 9
self.background_color = COLORS['CLOUD']
self.font_color = COLORS['BLUE-GREY']['700']
self.cursor_color = self.font_color
font_file = 'SourceCodePro-Regular.ttf'
font_location = os.path.join("resources/fonts", font_file)
rect = self.surface_rect
fsize = int(self.h) # font size proportional to screen height
self.txtFont = pygame.font.Font(
font_location, int(
fsize)) # keyboard font
# attempt to figure out how many chars will fit on a line
# this does not work with proportional fonts
tX = self.txtFont.render("XXXXXXXXXX", 1, (255, 255, 0)) # 10 chars
rtX = tX.get_rect() # how big is it?
# chars per line (horizontal)
self.lineChars = int(self.w / (rtX.width / 10)) - 1
self.lineH = self.h - 4 # pixels per line (vertical)
self.lineH = rtX.height # pixels per line (vertical)
self.cursorlayer = pygame.Surface(
(2, self.lineH - 20)) # thin vertical line
self.cursorlayer.fill(self.cursor_color) # white vertical line
self.cursorvis = True
self.cursorX = len(text) % self.lineChars
self.cursorY = int(len(text) / self.lineChars) # line 1
self.draw()
def draw(self):
''' Draw the text input box '''
self.layer.fill(self.background_color)
t1 = self.txtFont.render(self.text, 1, self.font_color) # line 1
self.layer.blit(t1, (10, -8))
self.drawcursor()
self.surface.blit(self.layer, self.rect)
pygame.display.update()
def flashcursor(self):
''' Toggle visibility of the cursor '''
if self.cursorvis:
self.cursorvis = False
else:
self.cursorvis = True
if self.cursorvis:
self.drawcursor()
self.draw()
def addcharatcursor(self, letter):
''' Add a character whereever the cursor is currently located '''
# print self.cursorpos
# print len(self.text)
# print self.max_length
if self.cursorpos < len(self.text) and len(self.text) < self.max_length:
# print 'Inserting in the middle'
self.text = self.text[:self.cursorpos] + letter + self.text[self.cursorpos:]
self.cursorpos += 1
self.draw()
return
if len(self.text) < self.max_length:
self.text += letter
self.cursorpos += 1
self.draw()
def backspace(self):
''' Delete a character before the cursor position '''
if self.cursorpos == 0:
return
self.text = self.text[:self.cursorpos - 1] + self.text[self.cursorpos:]
self.cursorpos -= 1
self.draw()
return
def deccursor(self):
''' Move the cursor one space left '''
if self.cursorpos == 0:
return
self.cursorpos -= 1
self.draw()
def inccursor(self):
''' Move the cursor one space right (but not beyond the end of the text) '''
if self.cursorpos == len(self.text):
return
self.cursorpos += 1
self.draw()
def drawcursor(self):
''' Draw the cursor '''
line = int(self.cursorpos / self.lineChars) # line number
if line > 1:
line = 1
x = 4
y = 4
# print y
# Calc width of text to this point
if self.cursorpos > 0:
linetext = self.text[line * self.lineChars:self.cursorpos]
rtext = self.txtFont.render(linetext, 1, self.font_color)
textpos = rtext.get_rect()
x = x + textpos.width + 6
if self.cursorvis:
self.cursorlayer.fill(self.cursor_color)
else:
self.cursorlayer.fill(self.background_color)
self.layer.blit(self.cursorlayer, (x, y))
pygame.display.update()
def setcursor(self, pos): # move cursor to char nearest position (x,y)
line = 0
x = pos[0] - self.x + line * self.w # virtual x position
p = 0
l = len(self.text)
while p < l:
text = self.txtFont.render(
self.text[
:p + 1], 1, (255, 255, 255)) # how many pixels to next char?
rtext = text.get_rect()
textX = rtext.x + rtext.width
if textX >= x:
break # we found it
p += 1
self.cursorpos = p
self.draw()
# ----------------------------------------------------------------------------
class VKey(pygame.sprite.Sprite):
''' A single key for the VirtualKeyboard '''
def __init__(
self,
caption,
x,
y,
w,
h,
font,
color,
special=False,
shiftkey=False):
pygame.sprite.Sprite.__init__(self)
self.x = x
self.y = y
self.w = w # + 1 # overlap borders
self.h = h # + 1 # overlap borders
self.special = special
self.enter = False
self.bskey = False
self.fskey = False
self.clear = False
self.spacekey = False
self.escape = False
self.shiftkey = shiftkey
self.font = font
self.selected = False
self.dirty = True
self.image = pygame.Surface((w - 1, h - 1))
self.rect = Rect(self.x, self.y, w, h)
self.color = color['500']
self.selected_color = color['100']
self.font_color = COLORS['CLOUD']
if special:
self.caption = ICONS.unicode(caption)
if shiftkey:
self.shifted_caption = ICONS.unicode('numeric')
else:
self.caption = caption
self.shifted_caption = self.caption.translate(Uppercase)
if not special or self.shiftkey:
self.shifted_text = self.font.render(
self.shifted_caption,
1,
self.font_color)
self.text = self.font.render(self.caption, 1, self.font_color)
def update(self, shifted=False, forcedraw=False):
''' Draw one key if it needs redrawing '''
if not forcedraw:
if not self.dirty:
return
text = self.text
if not self.special or self.shiftkey:
if shifted:
text = self.shifted_text
if self.selected:
color = self.selected_color
else:
color = self.color
self.image.fill(color)
textpos = text.get_rect()
blockoffx = (self.w / 2)
blockoffy = (self.h / 2)
offsetx = blockoffx - (textpos.width / 2)
offsety = blockoffy - (textpos.height / 2)
self.image.blit(text, (offsetx, offsety))
self.dirty = False
|
daftscienceLLC/pifile
|
keyboard.py
|
Python
|
gpl-3.0
| 18,205
|
import csv
import numpy
import os
from argparse import ArgumentParser
def generate_diagnostics(list_of_filenames):
(col_names, runs) = read_mcmc_traces(list_of_filenames)
runs2 = divide_runs(runs)
num_samples = numpy.shape(runs2[0])[0]
R_hat = []
for n in range(250,num_samples+1,250):
R_hat.append(compute_R_hat(runs2,n-1))
header = ','.join(['iterations'] + col_names[1:])
print header
idx=0
for n in range(250, num_samples + 1, 250):
row = ('%s,' % n) + ','.join(map(str,R_hat[idx].T))
print row
idx += 1
def read_mcmc_traces(list_of_filenames):
f=open(list_of_filenames[0])
reader = csv.reader(f)
col_names = next(reader)
f.close()
runs = []
for fname in list_of_filenames:
d = numpy.genfromtxt(fname, delimiter=",", skip_header=1)
runs.append(d)
return (col_names,runs)
def divide_runs(runs):
# split runs in half
runs2 = []
for i in range(0,len(runs)):
num_samples = numpy.shape(runs[i])[0]
num_cols = numpy.shape(runs[i])[1]
spl = int(numpy.floor(num_samples/2))
s1 = runs[i][0:(spl-1),:]
s1 = s1[:,1:num_cols]
runs2.append(s1)
s2 = runs[i][spl:num_samples, :]
s2 = s2[:,1:num_cols]
runs2.append(s2)
return runs2
def compute_R_hat(runs2,n):
# compute the R_hat measure (Gelman et al, Bayesian Data Analysis 3rd Ed, CRC Press, p284-285)
m = len(runs2)
nv = numpy.shape(runs2[0])[1]
pj = numpy.zeros((m,nv))
sj = numpy.zeros((m,nv))
for j in range(0,m):
pj[j,:] = numpy.mean(runs2[j][0:n,:],axis=0)
for j in range(0,m):
sj[j,:] = numpy.sum(numpy.power(runs2[j][0:n, :] - pj[j,:],2.0),axis=0)/(n-1)
B = numpy.zeros(nv)
pp = numpy.mean(pj,axis=0)
for j in range(0,m):
B = B + numpy.power(pj[j]-pp,2.0)
B = B*n/(m-1)
W = numpy.mean(sj,axis=0)
var = W*(n-1)/n + B/n
R_hat = numpy.sqrt(var/W)
return R_hat
def run():
parser = ArgumentParser(
description="calculate Gelman's R-hat measure from output files from multiple MITRE MCMC runs",
epilog='Results are printed to standard output.'
)
parser.add_argument('input_files',metavar='mcmc_trace_file',nargs='+',
help='paths to MITRE MCMC trace CSV files')
args = parser.parse_args()
generate_diagnostics(args.input_files)
|
gerberlab/mitre
|
mitre/mcmc_diagnostics.py
|
Python
|
gpl-3.0
| 2,438
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2006, 2010 Hervé Cauwelier <herve@oursours.net>
# Copyright (C) 2007 Henry Obein <henry.obein@gmail.com>
# Copyright (C) 2007 Sylvain Taverne <taverne.sylvain@gmail.com>
# Copyright (C) 2007-2008, 2010-2012 J. David Ibáñez <jdavid.ibp@gmail.com>
# Copyright (C) 2010 David Versmisse <versmisse@lil.univ-littoral.fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from the Standard Library
from datetime import datetime
import fnmatch
from heapq import heappush, heappop
from os.path import dirname
# Import from pygit2
import pygit2
from pygit2 import TreeBuilder, GIT_FILEMODE_TREE
from pygit2 import GIT_CHECKOUT_FORCE, GIT_CHECKOUT_REMOVE_UNTRACKED
# Import from itools
from itools.core import get_pipe, lazy
from itools.fs import lfs
from itools.handlers import Folder
from itools.log import log_error
from catalog import Catalog, make_catalog
from git import open_worktree
from registry import get_register_fields
from ro import RODatabase
MSG_URI_IS_BUSY = 'The "%s" URI is busy.'
class Heap(object):
"""
This object behaves very much like a sorted dict, but for security only a
subset of the dict API is exposed:
>>> len(heap)
>>> heap[path] = value
>>> value = heap.get(path)
>>> path, value = heap.popitem()
The keys are relative paths as used in Git trees, like 'a/b/c' (and '' for
the root).
The dictionary is sorted so deeper paths are considered smaller, and so
returned first by 'popitem'. The order relation between two paths of equal
depth is undefined.
This data structure is used by RWDatabase._save_changes to build the tree
objects before commit.
"""
def __init__(self):
self._dict = {}
self._heap = []
def __len__(self):
return len(self._dict)
def get(self, path):
return self._dict.get(path)
def __setitem__(self, path, value):
if path not in self._dict:
n = -path.count('/') if path else 1
heappush(self._heap, (n, path))
self._dict[path] = value
def popitem(self):
key = heappop(self._heap)
path = key[1]
return path, self._dict.pop(path)
class RWDatabase(RODatabase):
def __init__(self, path, size_min, size_max):
super(RWDatabase, self).__init__(path, size_min, size_max)
# The "git add" arguments
self.added = set()
self.changed = set()
self.removed = set()
self.has_changed = False
# The resources that been added, removed, changed and moved can be
# represented as a set of two element tuples. But we implement this
# with two dictionaries (old2new/new2old), to be able to access any
# "tuple" by either value. With the empty tuple we represent the
# absence of change.
#
# Tuple Description Implementation
# ----------- ------------------------- -------------------
# () nothing has been done yet {}/{}
# (None, 'b') resource 'b' added {}/{'b':None}
# ('b', None) resource 'b' removed {'b':None}/{}
# ('b', 'b') resource 'b' changed {'b':'b'}/{'b':'b'}
# ('b', 'c') resource 'b' moved to 'c' {'b':'c'}/{'c':'b'}
# ??? resource 'b' replaced {'b':None}/{'b':None}
#
# In real life, every value is either None or an absolute path (as a
# byte stringi). For the description that follows, we use the tuples
# as a compact representation.
#
# There are four operations:
#
# A(b) - add "b"
# R(b) - remove "b"
# C(b) - change "b"
# M(b,c) - move "b" to "c"
#
# Then, the algebra is:
#
# () -> A(b) -> (None, 'b')
# (b, None) -> A(b) -> (b, b)
# (None, b) -> A(b) -> error
# (b, b) -> A(b) -> error
# (b, c) -> A(b) -> (b, b), (None, c) FIXME Is this correct?
#
# TODO Finish
#
self.resources_old2new = {}
self.resources_new2old = {}
@lazy
def catalog(self):
path = '%s/catalog' % self.path
return Catalog(path, get_register_fields())
#######################################################################
# Layer 0: handlers
#######################################################################
def is_phantom(self, handler):
# Phantom handlers are "new"
if handler.timestamp or not handler.dirty:
return False
# They are attached to this database, but they are not in the cache
return handler.database is self and handler.key not in self.cache
def has_handler(self, key):
key = self.normalize_key(key)
# A new file/directory is only in added
n = len(key)
for f_key in self.added:
if f_key[:n] == key and (len(f_key) == n or f_key[n] == '/'):
return True
# Normal case
return super(RWDatabase, self).has_handler(key)
def _get_handler(self, key, cls=None, soft=False):
# A hook to handle the new directories
base = key + '/'
n = len(base)
for f_key in self.added:
if f_key[:n] == base:
return Folder(key, database=self)
# The other files
return super(RWDatabase, self)._get_handler(key, cls, soft)
def set_handler(self, key, handler):
if type(handler) is Folder:
raise ValueError, 'unexpected folder (only files can be "set")'
if handler.key is not None:
raise ValueError, 'only new files can be added, try to clone first'
key = self.normalize_key(key)
if self._get_handler(key, soft=True) is not None:
raise RuntimeError, MSG_URI_IS_BUSY % key
self.push_handler(key, handler)
self.added.add(key)
# Changed
self.removed.discard(key)
self.has_changed = True
def del_handler(self, key):
key = self.normalize_key(key)
# Case 1: file
handler = self._get_handler(key)
if type(handler) is not Folder:
self._discard_handler(key)
if key in self.added:
self.added.remove(key)
else:
self.changed.discard(key)
self.worktree.git_rm(key)
# Changed
self.removed.add(key)
self.has_changed = True
return
# Case 2: folder
base = key + '/'
for k in self.added.copy():
if k.startswith(base):
self._discard_handler(k)
self.added.discard(k)
for k in self.changed.copy():
if k.startswith(base):
self._discard_handler(k)
self.changed.discard(k)
# Remove me & childs from cache
for _handler in handler.traverse():
_handler_key = _handler.key
if self.cache.get(_handler_key):
self._discard_handler(_handler_key)
if self.fs.exists(key):
self.worktree.git_rm(key)
# Changed
self.removed.add(key)
self.has_changed = True
def touch_handler(self, key, handler=None):
key = self.normalize_key(key)
# Useful for the phantoms
if handler is None:
handler = self._get_handler(key)
# The phantoms become real files
if self.is_phantom(handler):
self.cache[key] = handler
self.added.add(key)
self.removed.discard(key)
self.has_changed = True
return
if handler.dirty is None:
# Load the handler if needed
if handler.timestamp is None:
handler.load_state()
# Mark the handler as dirty
handler.dirty = datetime.now()
# Update database state (XXX Should we do this?)
self.changed.add(key)
# Changed
self.removed.discard(key)
self.has_changed = True
def get_handler_names(self, key):
key = self.normalize_key(key)
# On the filesystem
names = super(RWDatabase, self).get_handler_names(key)
names = set(names)
# In added
base = key + '/'
n = len(base)
for f_key in self.added:
if f_key[:n] == base:
name = f_key[n:].split('/', 1)[0]
names.add(name)
# Remove .git
if key == "":
names.discard('.git')
return list(names)
def copy_handler(self, source, target, exclude_patterns=None):
source = self.normalize_key(source)
target = self.normalize_key(target)
# The trivial case
if source == target:
return
# Ignore copy of some handlers
if exclude_patterns is None:
exclude_patterns = []
for exclude_pattern in exclude_patterns:
if fnmatch.fnmatch(source, exclude_pattern):
return
# Check the target is free
if self._get_handler(target, soft=True) is not None:
raise RuntimeError, MSG_URI_IS_BUSY % target
handler = self._get_handler(source)
# Folder
if type(handler) is Folder:
fs = self.fs
for name in handler.get_handler_names():
self.copy_handler(fs.resolve2(source, name),
fs.resolve2(target, name),
exclude_patterns)
# File
else:
handler = handler.clone()
self.push_handler(target, handler)
self.added.add(target)
# Changed
self.removed.discard(target)
self.has_changed = True
def move_handler(self, source, target):
source = self.normalize_key(source)
target = self.normalize_key(target)
# The trivial case
if source == target:
return
# Check the target is free
if self._get_handler(target, soft=True) is not None:
raise RuntimeError, MSG_URI_IS_BUSY % target
# Go
fs = self.fs
cache = self.cache
# Case 1: file
handler = self._get_handler(source)
if type(handler) is not Folder:
if fs.exists(source):
self.worktree.git_mv(source, target, add=False)
# Remove source
self.added.discard(source)
self.changed.discard(source)
del cache[source]
# Add target
self.push_handler(target, handler)
self.added.add(target)
# Changed
self.removed.add(source)
self.removed.discard(target)
self.has_changed = True
return
# Case 2: Folder
n = len(source)
base = source + '/'
for key in self.added.copy():
if key.startswith(base):
new_key = '%s%s' % (target, key[n:])
handler = cache.pop(key)
self.push_handler(new_key, handler)
self.added.remove(key)
self.added.add(new_key)
for key in self.changed.copy():
if key.startswith(base):
new_key = '%s%s' % (target, key[n:])
handler = cache.pop(key)
self.push_handler(new_key, handler)
self.changed.remove(key)
if fs.exists(source):
self.worktree.git_mv(source, target, add=False)
for path in fs.traverse(target):
if not fs.is_folder(path):
path = fs.get_relative_path(path)
self.added.add(path)
# Changed
self.removed.add(source)
self.removed.discard(target)
self.has_changed = True
#######################################################################
# Layer 1: resources
#######################################################################
def remove_resource(self, resource):
old2new = self.resources_old2new
new2old = self.resources_new2old
for x in resource.traverse_resources():
path = str(x.abspath)
old2new[path] = None
new2old.pop(path, None)
def add_resource(self, resource):
old2new = self.resources_old2new
new2old = self.resources_new2old
# Catalog
for x in resource.traverse_resources():
path = str(x.abspath)
new2old[path] = None
def change_resource(self, resource):
old2new = self.resources_old2new
new2old = self.resources_new2old
# Case 1: added, moved in-here or already changed
path = str(resource.abspath)
if path in new2old:
return
# Case 2: removed or moved away
if path in old2new and not old2new[path]:
raise ValueError, 'cannot change a resource that has been removed'
# Case 3: not yet touched
old2new[path] = path
new2old[path] = path
def is_changed(self, resource):
"""We use for this function only the 2 dicts old2new and new2old.
"""
old2new = self.resources_old2new
new2old = self.resources_new2old
path = str(resource.abspath)
return path in old2new or path in new2old
def move_resource(self, source, new_path):
old2new = self.resources_old2new
new2old = self.resources_new2old
old_path = source.abspath
for x in source.traverse_resources():
source_path = x.abspath
target_path = new_path.resolve2(old_path.get_pathto(source_path))
source_path = str(source_path)
target_path = str(target_path)
if source_path in old2new and not old2new[source_path]:
err = 'cannot move a resource that has been removed'
raise ValueError, err
source_path = new2old.pop(source_path, source_path)
if source_path:
old2new[source_path] = target_path
new2old[target_path] = source_path
#######################################################################
# Transactions
#######################################################################
def _cleanup(self):
super(RWDatabase, self)._cleanup()
self.has_changed = False
def _abort_changes(self):
# 1. Handlers
cache = self.cache
for key in self.added:
self._discard_handler(key)
for key in self.changed:
cache[key].abort_changes()
# 2. Git
strategy = GIT_CHECKOUT_FORCE | GIT_CHECKOUT_REMOVE_UNTRACKED
if pygit2.__version__ >= '0.21.1':
self.worktree.repo.checkout_head(strategy=strategy)
else:
self.worktree.repo.checkout_head(strategy)
# Reset state
self.added.clear()
self.changed.clear()
self.removed.clear()
# 2. Catalog
self.catalog.abort_changes()
# 3. Resources
self.resources_old2new.clear()
self.resources_new2old.clear()
def abort_changes(self):
if not self.has_changed:
return
self._abort_changes()
self._cleanup()
def _before_commit(self):
"""This method is called before 'save_changes', and gives a chance
to the database to check for preconditions, if an error occurs here
the transaction will be aborted.
The value returned by this method will be passed to '_save_changes',
so it can be used to pre-calculate whatever data is needed.
"""
return None, None, None, [], []
def _save_changes(self, data):
worktree = self.worktree
# 1. Synchronize the handlers and the filesystem
added = self.added
for key in added:
handler = self.cache.get(key)
if handler and handler.dirty:
parent_path = dirname(key)
if not self.fs.exists(parent_path):
self.fs.make_folder(parent_path)
handler.save_state()
changed = self.changed
for key in changed:
handler = self.cache[key]
handler.save_state()
# 2. Build the 'git commit' command
git_author, git_date, git_msg, docs_to_index, docs_to_unindex = data
git_msg = git_msg or 'no comment'
# 3. Git add
git_add = list(added) + list(changed)
worktree.git_add(*git_add)
# 4. Create the tree
repo = worktree.repo
index = repo.index
try:
head = repo.revparse_single('HEAD')
except KeyError:
git_tree = None
else:
root = head.tree
# Initialize the heap
heap = Heap()
heap[''] = repo.TreeBuilder(root)
for key in git_add:
entry = index[key]
heap[key] = (entry.oid, entry.mode)
for key in self.removed:
heap[key] = None
while heap:
path, value = heap.popitem()
# Stop condition
if path == '':
git_tree = value.write()
break
if type(value) is TreeBuilder:
if len(value) == 0:
value = None
else:
oid = value.write()
value = (oid, GIT_FILEMODE_TREE)
# Split the path
if '/' in path:
parent, name = path.rsplit('/', 1)
else:
parent = ''
name = path
# Get the tree builder
tb = heap.get(parent)
if tb is None:
try:
tentry = root[parent]
except KeyError:
tb = repo.TreeBuilder()
else:
tree = repo[tentry.oid]
tb = repo.TreeBuilder(tree)
heap[parent] = tb
# Modify
if value is None:
# Sometimes there are empty folders left in the
# filesystem, but not in the tree, then we get a
# "Failed to remove entry" error. Be robust.
if tb.get(name) is not None:
tb.remove(name)
else:
tb.insert(name, value[0], value[1])
# 5. Git commit
worktree.git_commit(git_msg, git_author, git_date, tree=git_tree)
# 6. Clear state
changed.clear()
added.clear()
self.removed.clear()
# 7. Catalog
catalog = self.catalog
for path in docs_to_unindex:
catalog.unindex_document(path)
for resource, values in docs_to_index:
catalog.index_document(values)
catalog.save_changes()
def save_changes(self):
if not self.has_changed:
return
# Prepare for commit, do here the most you can, if something fails
# the transaction will be aborted
try:
data = self._before_commit()
except Exception:
log_error('Transaction failed', domain='itools.database')
try:
self._abort_changes()
except Exception:
log_error('Aborting failed', domain='itools.database')
self._cleanup()
raise
# Commit
try:
self._save_changes(data)
except Exception:
log_error('Transaction failed', domain='itools.database')
try:
self._abort_changes()
except Exception:
log_error('Aborting failed', domain='itools.database')
raise
finally:
self._cleanup()
def create_tag(self, tag_name, message=None):
worktree = self.worktree
if message is None:
message = tag_name
worktree.git_tag(tag_name, message)
def reset_to_tag(self, tag_name):
worktree = self.worktree
try:
# Reset the tree to the given tag name
worktree.git_reset(tag_name)
# Remove the tag
worktree.git_remove_tag(tag_name)
except Exception:
log_error('Transaction failed', domain='itools.database')
try:
self._abort_changes()
except Exception:
log_error('Aborting failed', domain='itools.database')
raise
def reindex_catalog(self, base_abspath, recursif=True):
"""Reindex the catalog & return nb resources re-indexed
"""
catalog = self.catalog
base_resource = self.get_resource(base_abspath, soft=True)
if base_resource is None:
return 0
n = 0
# Recursif ?
if recursif:
for item in base_resource.traverse_resources():
catalog.unindex_document(str(item.abspath))
values = item.get_catalog_values()
catalog.index_document(values)
n += 1
else:
# Reindex resource
catalog.unindex_document(base_abspath)
values = base_resource.get_catalog_values()
catalog.index_document(values)
n = 1
# Save catalog if has changes
if n > 0:
catalog.save_changes()
# Ok
return n
def make_git_database(path, size_min, size_max, fields=None):
"""Create a new empty Git database if the given path does not exists or
is a folder.
If the given path is a folder with content, the Git archive will be
initialized and the content of the folder will be added to it in a first
commit.
"""
path = lfs.get_absolute_path(path)
# Git init
open_worktree('%s/database' % path, init=True)
# The catalog
if fields is None:
fields = get_register_fields()
catalog = make_catalog('%s/catalog' % path, fields)
# Ok
database = RWDatabase(path, size_min, size_max)
database.catalog = catalog
return database
def check_database(target):
"""This function checks whether the database is in a consisitent state,
this is to say whether a transaction was not brutally aborted and left
the working directory with changes not committed.
This is meant to be used by scripts, like 'icms-start.py'
"""
cwd = '%s/database' % target
# Check modifications to the working tree not yet in the index.
command = ['git', 'ls-files', '-m', '-d', '-o']
data1 = get_pipe(command, cwd=cwd)
# Check changes in the index not yet committed.
command = ['git', 'diff-index', '--cached', '--name-only', 'HEAD']
data2 = get_pipe(command, cwd=cwd)
# Everything looks fine
if len(data1) == 0 and len(data2) == 0:
return True
# Something went wrong
print 'The database is not in a consistent state. Fix it manually with'
print 'the help of Git:'
print
print ' $ cd %s/database' % target
print ' $ git clean -fxd'
print ' $ git checkout -f'
print
return False
|
nicolasderam/itools
|
itools/database/rw.py
|
Python
|
gpl-3.0
| 24,113
|
""" A module for log entries' representation """
import datetime
import sys
# a number of constants controlling the structure of entries' headers
YEAR_SIZE = 4
MONTH_SIZE = 4
DAY_SIZE = 4
LENGTH_SIZE = 8
MARK_LENGTH_SIZE = 8
YEAR_OFFSET = 0
MONTH_OFFSET = YEAR_OFFSET + YEAR_SIZE
DAY_OFFSET = MONTH_OFFSET + MONTH_SIZE
LENGTH_OFFSET = DAY_OFFSET + DAY_SIZE
MARK_LENGTH_OFFSET = LENGTH_OFFSET + LENGTH_SIZE
HEADER_SIZE = MARK_LENGTH_OFFSET + MARK_LENGTH_SIZE
class Entry():
""" A representation of a log entry """
def __init__(self, contents, date, mark):
self.mark = mark
self.date = date
self.contents = contents
def __lt__(self, other):
return self.date < other.date
def __gt__(self, other):
return self.date > other.date
def __eq__(self, other):
return self.date == other.date
def format(self, no_date=False, no_mark=False, no_end=False):
""" Format entry for printing """
res = ""
if not no_date:
date = self.date.strftime("%Y %b %d, %A")
res += f"-- {date} --\n"
if not no_mark:
mark = "Not marked" if self.mark == "" else f"Marked: {self.mark}"
res += f"-- {mark} --\n"
res += f"{self.contents.strip()}"
if not no_end:
res += "\n-- end --\n"
return res
def to_bytes(self):
""" Convert an entry to a bytestring """
mark_len = len(self.mark)
cont = bytes(self.contents, "utf-8")
length = len(cont)
header = self.date.year.to_bytes(YEAR_SIZE, byteorder=sys.byteorder) \
+ self.date.month.to_bytes(MONTH_SIZE, byteorder=sys.byteorder) \
+ self.date.day.to_bytes(DAY_SIZE, byteorder=sys.byteorder) \
+ length.to_bytes(LENGTH_SIZE, byteorder=sys.byteorder) \
+ mark_len.to_bytes(MARK_LENGTH_SIZE, byteorder=sys.byteorder) \
+ bytes(self.mark, "utf-8")
return header + cont
def match(self, other):
"""
Return whether date and mark of this entry matches those of another
entry
"""
return (self.mark == other.mark
and self.date == other.date)
def merge(self, other):
""" Merge two entries """
left = self.contents.strip()
right = other.contents.strip()
self.contents = f"{left}\n{right}"
def to_text_file(self, filename, no_date=False, no_mark=False, no_end=False):
""" Dump the entry to a file. """
with filename.open("w") as f:
f.write(self.format(no_date, no_mark, no_end))
@classmethod
def from_binary_file(cls, from_file):
""" Load an entry from a binary file """
header = from_file.read(HEADER_SIZE)
if len(header) < HEADER_SIZE:
raise EntryReadError()
year = (int).from_bytes(header[YEAR_OFFSET : YEAR_OFFSET + YEAR_SIZE],
sys.byteorder)
month = (int).from_bytes(header[MONTH_OFFSET : MONTH_OFFSET + MONTH_SIZE],
sys.byteorder)
day = (int).from_bytes(header[DAY_OFFSET : DAY_OFFSET + DAY_SIZE],
sys.byteorder)
length = (int).from_bytes(header[LENGTH_OFFSET : LENGTH_OFFSET + LENGTH_SIZE],
sys.byteorder)
mark_len = (int).from_bytes(header[
MARK_LENGTH_OFFSET : MARK_LENGTH_OFFSET + MARK_LENGTH_SIZE],
sys.byteorder)
rest = from_file.read(length + mark_len)
mark = rest[:mark_len].decode("utf-8")
contents = rest[mark_len:mark_len + length].decode("utf-8")
return Entry(contents, datetime.date(year, month, day), mark)
@classmethod
def from_text_file(cls, filepath, date, mark):
"""
Load an entry from a text file.
'filepath' must be a pathlib's Path.
"""
with filepath.open("r") as f:
cont = f.read()
return Entry(cont, date, mark)
class EntryReadError(Exception):
"""
An error of this type will be raised when an entry read from a binary file
is malformed.
"""
pass
|
mpevnev/Simlog
|
src/entry.py
|
Python
|
gpl-3.0
| 4,113
|
import time
import itertools
import heapq
import operator
from functools import reduce
from collections import namedtuple
import numpy as np
import pyqtgraph as pg
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt, QRectF, QPointF
import Orange.data
from Orange.data.sql.table import SqlTable
from Orange.statistics import contingency
from Orange.feature.discretization import EqualWidth, _discretized_var
from Orange.widgets import widget, gui, settings
from Orange.widgets.utils import itemmodels, colorpalette
def is_discrete(var):
return isinstance(var, Orange.data.DiscreteVariable)
def is_continuous(var):
return isinstance(var, Orange.data.ContinuousVariable)
def is_not_none(obj):
return obj is not None
Tree = namedtuple(
"Tree",
["xbins", # bin edges on the first axis
"ybins", # bin edges on the second axis
"contingencies", # x/y contingency table/s
"children", # an (nbins, nbins) array of sub trees or None (if leaf)
]
)
Tree.is_leaf = property(
lambda self: self.children is None
)
Tree.is_empty = property(
lambda self: not np.any(self.contingencies)
)
Tree.brect = property(
lambda self:
(self.xbins[0],
self.ybins[0],
self.xbins[-1] - self.xbins[0],
self.ybins[-1] - self.ybins[0])
)
Tree.nbins = property(
lambda self: self.xbins.size - 1
)
Tree.depth = (
lambda self:
1 if self.is_leaf
else max(ch.depth() + 1
for ch in filter(is_not_none, self.children.flat))
)
def max_contingency(node):
"""Return the maximum contingency value from node."""
if node.is_leaf:
return node.contingencies.max()
else:
valid = np.nonzero(node.children)
children = node.children[valid]
mask = np.ones_like(node.children, dtype=bool)
mask[valid] = False
ctng = node.contingencies[mask]
v = 0.0
if len(children):
v = max(max_contingency(ch) for ch in children)
if len(ctng):
v = max(ctng.max(), v)
return v
def blockshaped(arr, rows, cols):
N, M = arr.shape[:2]
rest = arr.shape[2:]
assert N % rows == 0
assert M % cols == 0
return (arr.reshape((N // rows, rows, -1, cols) + rest)
.swapaxes(1, 2)
.reshape((N // rows, M // cols, rows, cols) + rest))
Rect, RoundRect, Circle = 0, 1, 2
def lod_from_transform(T):
# Return level of detail from a only transform without taking
# into account the rotation or shear.
r = T.mapRect(QRectF(0, 0, 1, 1))
return np.sqrt(r.width() * r.height())
class DensityPatch(pg.GraphicsObject):
Rect, RoundRect, Circle = Rect, RoundRect, Circle
Linear, Sqrt, Log = 1, 2, 3
def __init__(self, root=None, cell_size=10, cell_shape=Rect,
color_scale=Sqrt, palette=None):
super().__init__()
self.setFlag(QtGui.QGraphicsItem.ItemUsesExtendedStyleOption, True)
self._root = root
self._cache = {}
self._cell_size = cell_size
self._cell_shape = cell_shape
self._color_scale = color_scale
self._palette = palette
def boundingRect(self):
return self.rect()
def rect(self):
if self._root is not None:
return QRectF(*self._root.brect)
else:
return QRectF()
def set_root(self, root):
self.prepareGeometryChange()
self._root = root
self._cache.clear()
self.update()
def set_cell_shape(self, shape):
if self._cell_shape != shape:
self._cell_shape = shape
self.update()
def cell_shape(self):
return self._cell_shape
def set_cell_size(self, size):
assert size >= 1
if self._cell_size != size:
self._cell_size = size
self.update()
def cell_size(self):
return self._cell_size
def set_color_scale(self, scale):
if self._color_scale != scale:
self._color_scale = scale
self.update()
def color_scale(self):
return self._color_scale
def paint(self, painter, option, widget):
root = self._root
if root is None:
return
cell_shape, cell_size = self._cell_shape, self._cell_size
nbins = root.nbins
T = painter.worldTransform()
# level of detail is the geometric mean of a transformed
# unit rectangle's sides (== sqrt(area)).
# lod = option.levelOfDetailFromTransform(T)
lod = lod_from_transform(T)
rect = self.rect()
# sqrt(area) of one cell
size1 = np.sqrt(rect.width() * rect.height()) / nbins
cell_size = cell_size
scale = cell_size / (lod * size1)
if np.isinf(scale):
scale = np.finfo(float).max
p = int(np.floor(np.log2(scale)))
p = min(max(p, - int(np.log2(nbins ** (root.depth() - 1)))),
int(np.log2(root.nbins)))
if (p, cell_shape, cell_size) not in self._cache:
rs_root = resample(root, 2 ** p)
rs_max = max_contingency(rs_root)
def log_scale(ctng):
log_max = np.log(rs_max + 1)
log_ctng = np.log(ctng + 1)
return log_ctng / log_max
def sqrt_scale(ctng):
sqrt_max = np.sqrt(rs_max)
sqrt_ctng = np.sqrt(ctng)
return sqrt_ctng / (sqrt_max or 1)
def lin_scale(ctng):
return ctng / (rs_max or 1)
scale = {self.Linear: lin_scale, self.Sqrt: sqrt_scale,
self.Log: log_scale}
patch = Patch_create(rs_root, palette=self._palette,
scale=scale[self._color_scale],
shape=cell_shape)
self._cache[p, cell_shape, cell_size] = patch
else:
patch = self._cache[p, cell_shape, cell_size]
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.setPen(Qt.NoPen)
for picture in picture_intersect(patch, option.exposedRect):
picture.play(painter)
#: A visual patch utilizing QPicture
Patch = namedtuple(
"Patch",
["node", # : Tree # source node (Tree)
"picture", # : () -> QPicture # combined full QPicture
"children", # : () -> sequence # all child subpatches
]
)
Patch.rect = property(
lambda self: QRectF(*self.node.brect)
)
Patch.is_leaf = property(
lambda self: len(self.children()) == 0
)
Some = namedtuple("Some", ["val"])
def once(f):
cached = None
def f_once():
nonlocal cached
if cached is None:
cached = Some(f())
return cached.val
return f_once
def picture_intersect(patch, region):
"""Return a list of all QPictures in `patch` that intersect region.
"""
if not region.intersects(patch.rect):
return []
elif region.contains(patch.rect) or patch.is_leaf:
return [patch.picture()]
else:
accum = reduce(
operator.iadd,
(picture_intersect(child, region) for child in patch.children()),
[]
)
return accum
def Patch_create(node, palette=None, scale=None, shape=Rect):
"""Create a `Patch` for visualizing node.
"""
# note: the patch (picture and children fields) is is lazy evaluated.
if node.is_empty:
return Patch(node, once(lambda: QtGui.QPicture()), once(lambda: ()))
else:
@once
def picture_this_level():
pic = QtGui.QPicture()
painter = QtGui.QPainter(pic)
ctng = node.contingencies
colors = create_image(ctng, palette, scale=scale)
x, y, w, h = node.brect
N, M = ctng.shape[:2]
# Nonzero contingency mask
any_mask = Node_mask(node)
if node.is_leaf:
skip = itertools.repeat(False)
else:
# Skip all None children they were already painted.
skip = (ch is not None for ch in node.children.flat)
painter.save()
painter.translate(x, y)
painter.scale(w / node.nbins, h / node.nbins)
indices = itertools.product(range(N), range(M))
for (i, j), skip, any_ in zip(indices, skip, any_mask.flat):
if not skip and any_:
painter.setBrush(QtGui.QColor(*colors[i, j]))
if shape == Rect:
painter.drawRect(i, j, 1, 1)
elif shape == Circle:
painter.drawEllipse(i, j, 1, 1)
elif shape == RoundRect:
painter.drawRoundedRect(i, j, 1, 1, 25.0, 25.0,
Qt.RelativeSize)
painter.restore()
painter.end()
return pic
@once
def child_patches():
if node.is_leaf:
children = []
else:
children = filter(is_not_none, node.children.flat)
return tuple(Patch_create(child, palette, scale, shape)
for child in children) + \
(Patch(node, picture_this_level, once(lambda: ())),)
@once
def picture_children():
pic = QtGui.QPicture()
painter = QtGui.QPainter(pic)
for ch in child_patches():
painter.drawPicture(0, 0, ch.picture())
painter.end()
return pic
return Patch(node, picture_children, child_patches)
def resample(node, samplewidth):
assert 0 < samplewidth <= node.nbins
assert int(np.log2(samplewidth)) == np.log2(samplewidth)
if samplewidth == 1:
return node._replace(children=None)
elif samplewidth > 1:
samplewidth = int(samplewidth)
ctng = blockshaped(node.contingencies, samplewidth, samplewidth)
ctng = ctng.sum(axis=(2, 3))
assert ctng.shape[0] == node.nbins // samplewidth
return Tree(node.xbins[::samplewidth],
node.ybins[::samplewidth],
ctng,
None)
elif node.is_leaf:
return Tree(*node)
else:
nbins = node.nbins
children = [resample(ch, samplewidth * nbins)
if ch is not None else None
for ch in node.children.flat]
children_ar = np.full(nbins ** 2, None, dtype=object)
children_ar[:] = children
return node._replace(children=children_ar.reshape((-1, nbins)))
class OWHeatMap(widget.OWWidget):
name = "Heat map"
description = "Draw a two dimentional density plot."
icon = "icons/Heatmap.svg"
priority = 100
inputs = [("Data", Orange.data.Table, "set_data")]
settingsHandler = settings.DomainContextHandler()
x_var_index = settings.Setting(0)
y_var_index = settings.Setting(1)
z_var_index = settings.Setting(0)
selected_z_values = settings.Setting([])
color_scale = settings.Setting(1)
sample_level = settings.Setting(0)
sample_percentages = []
sample_percentages_captions = []
sample_times = [0.1, 0.5, 3, 5, 20, 40, 80]
sample_times_captions = ['0.1s', '1s', '5s', '10s', '30s', '1min', '2min']
use_cache = settings.Setting(True)
n_bins = 2 ** 4
mouse_mode = 0
def __init__(self, parent=None):
super().__init__(self, parent)
self.dataset = None
self.z_values = []
self._root = None
self._displayed_root = None
self._item = None
self._cache = {}
self.colors = colorpalette.ColorPaletteGenerator(10)
self.sampling_box = box = gui.widgetBox(self.controlArea, "Sampling")
sampling_options =\
self.sample_times_captions + self.sample_percentages_captions
gui.comboBox(box, self, 'sample_level',
items=sampling_options,
callback=self.update_sample)
gui.button(box, self, "Sharpen", self.sharpen)
box = gui.widgetBox(self.controlArea, "Input")
self.labelDataInput = gui.widgetLabel(box, 'No data on input')
self.labelDataInput.setTextFormat(Qt.PlainText)
self.labelOutput = gui.widgetLabel(box, '')
self.x_var_model = itemmodels.VariableListModel()
self.comboBoxAttributesX = gui.comboBox(
self.controlArea, self, value='x_var_index', box='X Attribute',
callback=self.replot)
self.comboBoxAttributesX.setModel(self.x_var_model)
self.y_var_model = itemmodels.VariableListModel()
self.comboBoxAttributesY = gui.comboBox(
self.controlArea, self, value='y_var_index', box='Y Attribute',
callback=self.replot)
self.comboBoxAttributesY.setModel(self.y_var_model)
box = gui.widgetBox(self.controlArea, "Color by")
self.z_var_model = itemmodels.VariableListModel()
self.comboBoxClassvars = gui.comboBox(
box, self, value='z_var_index',
callback=self._on_z_var_changed)
self.comboBoxClassvars.setModel(self.z_var_model)
box1 = gui.widgetBox(box, 'Colors displayed', margin=0)
box1.setFlat(True)
self.z_values_view = gui.listBox(
box1, self, "selected_z_values", "z_values",
callback=self._on_z_values_selection_changed,
selectionMode=QtGui.QListView.MultiSelection,
addSpace=False
)
box1 = gui.widgetBox(box, "Color Scale", margin=0)
box1.setFlat(True)
gui.comboBox(box1, self, "color_scale",
items=["Linear", "Square root", "Logarithmic"],
callback=self._on_color_scale_changed)
self.mouseBehaviourBox = gui.radioButtons(
self.controlArea, self, value='mouse_mode',
btnLabels=('Drag', 'Select'),
box='Mouse left button behavior',
callback=self._update_mouse_mode
)
gui.rubber(self.controlArea)
self.plot = pg.PlotWidget(background="w")
self.plot.setMenuEnabled(False)
self.plot.setFrameStyle(QtGui.QFrame.StyledPanel)
self.plot.setMinimumSize(500, 500)
def font_resize(font, factor, minsize=None, maxsize=None):
font = QtGui.QFont(font)
fontinfo = QtGui.QFontInfo(font)
size = fontinfo.pointSizeF() * factor
if minsize is not None:
size = max(size, minsize)
if maxsize is not None:
size = min(size, maxsize)
font.setPointSizeF(size)
return font
axisfont = font_resize(self.font(), 0.8, minsize=11)
axispen = QtGui.QPen(self.palette().color(QtGui.QPalette.Text))
axis = self.plot.getAxis("bottom")
axis.setTickFont(axisfont)
axis.setPen(axispen)
axis = self.plot.getAxis("left")
axis.setTickFont(axisfont)
axis.setPen(axispen)
self.plot.getViewBox().sigTransformChanged.connect(
self._on_transform_changed)
self.mainArea.layout().addWidget(self.plot)
def set_data(self, dataset):
self.closeContext()
self.clear()
if isinstance(dataset, SqlTable):
self.original_data = dataset
self.sample_level = 0
self.sampling_box.setVisible(True)
self.update_sample()
else:
self.dataset = dataset
self.sampling_box.setVisible(False)
self.set_sampled_data(self.dataset)
def update_sample(self):
self.clear()
if self.sample_level < len(self.sample_times):
sample_type = 'time'
level = self.sample_times[self.sample_level]
else:
sample_type = 'percentage'
level = self.sample_level - len(self.sample_times)
level = self.sample_percentages[level]
if sample_type == 'time':
self.dataset = \
self.original_data.sample_time(level, no_cache=True)
else:
if 0 < level < 100:
self.dataset = \
self.original_data.sample_percentage(level, no_cache=True)
if level >= 100:
self.dataset = self.original_data
self.set_sampled_data(self.dataset)
def set_sampled_data(self, dataset):
if dataset is not None:
domain = dataset.domain
cvars = list(filter(is_continuous, domain.variables))
dvars = list(filter(is_discrete, domain.variables))
self.x_var_model[:] = cvars
self.y_var_model[:] = cvars
self.z_var_model[:] = dvars
nvars = len(cvars)
self.x_var_index = min(max(0, self.x_var_index), nvars - 1)
self.y_var_index = min(max(0, self.y_var_index), nvars - 1)
self.z_var_index = min(max(0, self.z_var_index), len(cvars) - 1)
if is_discrete(domain.class_var):
self.z_var_index = dvars.index(domain.class_var)
else:
self.z_var_index = len(dvars) - 1
self.openContext(dataset)
if 0 <= self.z_var_index < len(self.z_var_model):
self.z_values = self.z_var_model[self.z_var_index].values
k = len(self.z_values)
self.selected_z_values = range(k)
self.colors = colorpalette.ColorPaletteGenerator(k)
for i in range(k):
item = self.z_values_view.item(i)
item.setIcon(colorpalette.ColorPixmap(self.colors[i]))
self.labelDataInput.setText(
'Data set: %s'
% (getattr(self.dataset, "name", "untitled"),)
)
self.setup_plot()
else:
self.labelDataInput.setText('No data on input')
self.send("Sampled data", None)
def clear(self):
self.dataset = None
self.x_var_model[:] = []
self.y_var_model[:] = []
self.z_var_model[:] = []
self.z_values = []
self._root = None
self._displayed_root = None
self._item = None
self._cache = {}
self.plot.clear()
def _on_z_var_changed(self):
if 0 <= self.z_var_index < len(self.z_var_model):
self.z_values = self.z_var_model[self.z_var_index].values
k = len(self.z_values)
self.selected_z_values = range(k)
self.colors = colorpalette.ColorPaletteGenerator(k)
for i in range(k):
item = self.z_values_view.item(i)
item.setIcon(colorpalette.ColorPixmap(self.colors[i]))
self.replot()
def _on_z_values_selection_changed(self):
if self._displayed_root is not None:
self.update_map(self._displayed_root)
def _on_color_scale_changed(self):
if self._displayed_root is not None:
self.update_map(self._displayed_root)
def setup_plot(self):
"""Setup the density map plot"""
self.plot.clear()
if self.dataset is None or self.x_var_index == -1 or \
self.y_var_index == -1:
return
data = self.dataset
xvar = self.x_var_model[self.x_var_index]
yvar = self.y_var_model[self.y_var_index]
if 0 <= self.z_var_index < len(self.z_var_model):
zvar = self.z_var_model[self.z_var_index]
else:
zvar = None
axis = self.plot.getAxis("bottom")
axis.setLabel(xvar.name)
axis = self.plot.getAxis("left")
axis.setLabel(yvar.name)
if (xvar, yvar, zvar) in self._cache:
root = self._cache[xvar, yvar, zvar]
else:
root = self.get_root(data, xvar, yvar, zvar)
self._cache[xvar, yvar, zvar] = root
self._root = root
self.update_map(root)
def get_root(self, data, xvar, yvar, zvar=None):
"""Compute the root density map item"""
assert self.n_bins > 2
x_disc = EqualWidth(n=self.n_bins)(data, xvar)
y_disc = EqualWidth(n=self.n_bins)(data, yvar)
def bins(var):
points = list(var.compute_value.points)
assert points[0] <= points[1]
width = points[1] - points[0]
return np.array([points[0] - width] +
points +
[points[-1] + width])
xbins = bins(x_disc)
ybins = bins(y_disc)
# Extend the lower/upper bin edges to infinity.
# (the grid_bin function has an optimization for this case).
xbins1 = np.r_[-np.inf, xbins[1:-1], np.inf]
ybins1 = np.r_[-np.inf, ybins[1:-1], np.inf]
t = grid_bin(data, xvar, yvar, xbins1, ybins1, zvar=zvar)
return t._replace(xbins=xbins, ybins=ybins)
def replot(self):
self.plot.clear()
self.setup_plot()
def update_map(self, root):
self.plot.clear()
self._item = None
self._displayed_root = root
palette = self.colors
contingencies = root.contingencies
def Tree_take(node, indices, axis):
"""Take elements from the contingency matrices in node."""
contig = np.take(node.contingencies, indices, axis)
if node.is_leaf:
return node._replace(contingencies=contig)
else:
children_ar = np.full(node.children.size, None, dtype=object)
children_ar[:] = [
Tree_take(ch, indices, axis) if ch is not None else None
for ch in node.children.flat
]
children = children_ar.reshape(node.children.shape)
return node._replace(contingencies=contig, children=children)
if contingencies.ndim == 3:
if not self.selected_z_values:
return
_, _, k = contingencies.shape
if self.selected_z_values != list(range(k)):
palette = [palette[i] for i in self.selected_z_values]
root = Tree_take(root, self.selected_z_values, 2)
self._item = item = DensityPatch(
root, cell_size=10,
cell_shape=DensityPatch.Rect,
color_scale=self.color_scale + 1,
palette=palette
)
self.plot.addItem(item)
def sharpen(self):
viewb = self.plot.getViewBox()
rect = viewb.boundingRect()
p1 = viewb.mapToView(rect.topLeft())
p2 = viewb.mapToView(rect.bottomRight())
rect = QtCore.QRectF(p1, p2).normalized()
self.sharpen_region(rect)
def sharpen_root_region(self, region):
data = self.dataset
xvar = self.x_var_model[self.x_var_index]
yvar = self.y_var_model[self.y_var_index]
if 0 <= self.z_var_index < len(self.z_var_model):
zvar = self.z_var_model[self.z_var_index]
else:
zvar = None
root = self._root
if not QRectF(*root.brect).intersects(region):
return
nbins = self.n_bins
def bin_func(xbins, ybins):
return grid_bin(data, xvar, yvar, xbins, ybins, zvar)
self.progressBarInit()
last_node = root
update_time = time.time()
changed = False
for i, node in enumerate(
sharpen_region(self._root, region, nbins, bin_func)):
tick = time.time() - update_time
changed = changed or node is not last_node
if changed and ((i % nbins == 0) or tick > 2.0):
self.update_map(node)
last_node = node
changed = False
update_time = time.time()
self.progressBarSet(100 * i / (nbins ** 2))
self._root = last_node
self._cache[xvar, yvar, zvar] = self._root
self.update_map(self._root)
self.progressBarFinished()
def _sampling_width(self):
if self._item is None:
return 0
item = self._item
rect = item.rect()
T = self.plot.transform() * item.sceneTransform()
# lod = QtGui.QStyleOptionGraphicsItem.levelOfDetailFromTransform(T)
lod = lod_from_transform(T)
size1 = np.sqrt(rect.width() * rect.height()) / self.n_bins
cell_size = 10
scale = cell_size / (lod * size1)
if np.isinf(scale):
scale = np.finfo(float).max
p = int(np.floor(np.log2(scale)))
p = min(p, int(np.log2(self.n_bins)))
return 2 ** int(p)
def sharpen_region(self, region):
data = self.dataset
xvar = self.x_var_model[self.x_var_index]
yvar = self.y_var_model[self.y_var_index]
if 0 <= self.z_var_index < len(self.z_var_model):
zvar = self.z_var_model[self.z_var_index]
else:
zvar = None
root = self._root
nbins = self.n_bins
if not QRectF(*root.brect).intersects(region):
return
def bin_func(xbins, ybins):
return grid_bin(data, xvar, yvar, xbins, ybins, zvar)
def min_depth(node, region):
if not region.intersects(QRectF(*node.brect)):
return np.inf
elif node.is_leaf:
return 1
elif node.is_empty:
return 1
else:
xs, xe, ys, ye = bindices(node, region)
children = node.children[xs: xe, ys: ye].ravel()
contingency = node.contingencies[xs: xe, ys: ye]
if contingency.ndim == 3:
contingency = contingency.reshape(-1, contingency.shape[2])
if any(ch is None and np.any(val)
for ch, val in zip(children, contingency)):
return 1
else:
ch_depth = [min_depth(ch, region) + 1
for ch in filter(is_not_none, children.flat)]
return min(ch_depth if ch_depth else [1])
depth = min_depth(self._root, region)
bw = self._sampling_width()
nodes = self.select_nodes_to_sharpen(self._root, region, bw,
depth + 1)
def update_rects(node):
scored = score_candidate_rects(node, region)
ind1 = set(zip(*Node_nonzero(node)))
ind2 = set(zip(*node.children.nonzero())) \
if not node.is_leaf else set()
ind = ind1 - ind2
return [(score, r) for score, i, j, r in scored if (i, j) in ind]
scored_rects = reduce(operator.iadd, map(update_rects, nodes), [])
scored_rects = sorted(scored_rects, reverse=True,
key=operator.itemgetter(0))
root = self._root
self.progressBarInit()
update_time = time.time()
for i, (_, rect) in enumerate(scored_rects):
root = sharpen_region_recur(
root, rect.intersect(region),
nbins, depth + 1, bin_func
)
tick = time.time() - update_time
if tick > 2.0:
self.update_map(root)
update_time = time.time()
self.progressBarSet(100 * i / len(scored_rects))
self._root = root
self._cache[xvar, yvar, zvar] = self._root
self.update_map(self._root)
self.progressBarFinished()
def select_nodes_to_sharpen(self, node, region, bw, depth):
"""
:param node:
:param bw: bandwidth (samplewidth)
:param depth: maximum node depth to consider
"""
if not QRectF(*node.brect).intersects(region):
return []
elif bw >= 1:
return []
elif depth == 1:
return []
elif node.is_empty:
return []
elif node.is_leaf:
return [node]
else:
xs, xe, ys, ye = bindices(node, region)
def intersect_indices(rows, cols):
mask = (xs <= rows) & (rows < xe) & (ys <= cols) & (cols < ye)
return rows[mask], cols[mask]
indices1 = intersect_indices(*Node_nonzero(node))
indices2 = intersect_indices(*node.children.nonzero())
# If there are any non empty and non expanded cells in the
# intersection return the node for sharpening, ...
if np.any(np.array(indices1) != np.array(indices2)):
return [node]
children = node.children[indices2]
# ... else run down the children in the intersection
return reduce(operator.iadd,
(self.select_nodes_to_sharpen(
ch, region, bw * node.nbins, depth - 1)
for ch in children.flat),
[])
def _update_mouse_mode(self):
if self.mouse_mode == 0:
mode = pg.ViewBox.PanMode
else:
mode = pg.ViewBox.RectMode
self.plot.getViewBox().setMouseMode(mode)
def _on_transform_changed(self, *args):
pass
def onDeleteWidget(self):
self.clear()
super().onDeleteWidget()
def grid_bin(data, xvar, yvar, xbins, ybins, zvar=None):
x_disc = _discretized_var(data, xvar, xbins[1:-1])
y_disc = _discretized_var(data, yvar, ybins[1:-1])
x_min, x_max = xbins[0], xbins[-1]
y_min, y_max = ybins[0], ybins[-1]
querydomain = [x_disc, y_disc]
if zvar is not None:
querydomain = querydomain + [zvar]
querydomain = Orange.data.Domain(querydomain)
def interval_filter(var, low, high):
return Orange.data.filter.Values(
[Orange.data.filter.FilterContinuous(
var, max=high, min=low,
oper=Orange.data.filter.FilterContinuous.Between)]
)
def value_filter(var, val):
return Orange.data.filter.Values(
[Orange.data.filter.FilterDiscrete(var, [val])]
)
def filters_join(filters):
return Orange.data.filter.Values(
reduce(list.__iadd__, (f.conditions for f in filters), [])
)
inf_bounds = np.isinf([x_min, x_max, y_min, y_max])
if not all(inf_bounds):
# No need to filter the data
range_filters = [interval_filter(xvar, x_min, x_max),
interval_filter(yvar, y_min, y_max)]
range_filter = filters_join(range_filters)
subset = range_filter(data)
else:
subset = data
if is_discrete(zvar):
filters = [value_filter(zvar, val) for val in zvar.values]
contingencies = [
contingency.get_contingency(
filter_(subset.from_table(querydomain, subset)),
col_variable=y_disc, row_variable=x_disc
)
for filter_ in filters
]
contingencies = np.dstack(contingencies)
else:
contingencies = contingency.get_contingency(
subset.from_table(querydomain, subset),
col_variable=y_disc, row_variable=x_disc
)
contingencies = np.asarray(contingencies)
return Tree(xbins, ybins, contingencies, None)
def sharpen_node_cell(node, i, j, nbins, gridbin_func):
if node.is_leaf:
children = np.full((nbins, nbins), None, dtype=object)
else:
children = np.array(node.children, dtype=None)
xbins = np.linspace(node.xbins[i], node.xbins[i + 1], nbins + 1)
ybins = np.linspace(node.ybins[j], node.ybins[j + 1], nbins + 1)
if node.contingencies[i, j].any():
t = gridbin_func(xbins, ybins)
assert t.contingencies.shape[:2] == (nbins, nbins)
children[i, j] = t
return node._replace(children=children)
else:
return node
def sharpen_node_cell_range(node, xrange, yrange, nbins, gridbin_func):
if node.is_leaf:
children = np.full((nbins, nbins), None, dtype=object)
else:
children = np.array(node.children, dtype=None)
xs, xe = xrange.start, xrange.stop
ys, ye = yrange.start, yrange.stop
xbins = np.linspace(node.xbins[xs], node.xbins[xe], (xe - xs) * nbins + 1)
ybins = np.linspace(node.ybins[ys], node.ybins[ye], (ye - ys) * nbins + 1)
if node.contingencies[xs: xe, ys: ye].any():
t = gridbin_func(xbins, ybins)
for i, j in itertools.product(range(xs, xe), range(ys, ye)):
children[i, j] = Tree(t.xbins[i * nbins: (i + 1) * nbins + 1],
t.ybins[j * nbins: (j + 1) * nbins + 1],
t.contingencies[xs: xe, ys: ye])
assert children[i, j].shape[:2] == (nbins, nbins)
return node._replace(children=children)
else:
return node
def sharpen_region(node, region, nbins, gridbin_func):
if not QRectF(*node.brect).intersects(region):
raise ValueError()
# return node
xs, xe, ys, ye = bindices(node, region)
ndim = node.contingencies.ndim
if node.children is not None:
children = np.array(node.children, dtype=object)
assert children.ndim == 2
else:
children = np.full((nbins, nbins), None, dtype=object)
if ndim == 3:
# compute_chisqares expects classes in 1 dim
c = node.contingencies
chi_lr, chi_up = compute_chi_squares(
c[xs: xe, ys: ye, :].swapaxes(1, 2).swapaxes(0, 1)
)
def max_chisq(i, j):
def valid(i, j):
return 0 <= i < chi_up.shape[0] and \
0 <= j < chi_lr.shape[1]
return max(chi_lr[i, j] if valid(i, j) else 0,
chi_lr[i, j - 1] if valid(i, j - 1) else 0,
chi_up[i, j] if valid(i, j) else 0,
chi_up[i - 1, j] if valid(i - 1, j) else 0)
heap = [(-max_chisq(i - xs, j - ys), (i, j))
for i in range(xs, xe)
for j in range(ys, ye)
if children[i, j] is None]
else:
heap = list(enumerate((i, j)
for i in range(xs, xe)
for j in range(ys, ye)
if children[i, j] is None))
heap = sorted(heap)
update_node = node
while heap:
_, (i, j) = heapq.heappop(heap)
xbins = np.linspace(node.xbins[i], node.xbins[i + 1], nbins + 1)
ybins = np.linspace(node.ybins[j], node.ybins[j + 1], nbins + 1)
if node.contingencies[i, j].any():
t = gridbin_func(xbins, ybins)
assert t.contingencies.shape[:2] == (nbins, nbins)
else:
t = None
children[i, j] = t
if t is None:
yield update_node
else:
update_node = update_node._replace(
children=np.array(children, dtype=object)
)
yield update_node
def Node_mask(node):
if node.contingencies.ndim == 3:
return node.contingencies.any(axis=2)
else:
return node.contingencies > 0
def Node_nonzero(node):
return np.nonzero(Node_mask(node))
def sharpen_region_recur(node, region, nbins, depth, gridbin_func):
if depth <= 1:
return node
elif not QRectF(*node.brect).intersects(region):
return node
elif node.is_empty:
return node
elif node.is_leaf:
xs, xe, ys, ye = bindices(node, region)
# indices in need of update
indices = Node_nonzero(node)
for i, j in zip(*indices):
if xs <= i < xe and ys <= j < ye:
node = sharpen_node_cell(node, i, j, nbins, gridbin_func)
# if the exposed region is empty the node.is_leaf property
# is preserved
if node.is_leaf:
return node
return sharpen_region_recur(node, region, nbins, depth, gridbin_func)
else:
xs, xe, ys, ye = bindices(node, region)
# indices is need of update
indices1 = Node_nonzero(node)
indices2 = node.children.nonzero()
indices = sorted(set(list(zip(*indices1))) - set(list(zip(*indices2))))
for i, j in indices:
if xs <= i < xe and ys <= j < ye:
node = sharpen_node_cell(node, i, j, nbins, gridbin_func)
children = np.array(node.children, dtype=object)
children[xs: xe, ys: ye] = [
[sharpen_region_recur(ch, region, nbins, depth - 1, gridbin_func)
if ch is not None else None
for ch in row]
for row in np.array(children[xs: xe, ys: ye])
]
return node._replace(children=children)
def stack_tile_blocks(blocks):
return np.vstack(list(map(np.hstack, blocks)))
def bins_join(bins):
return np.hstack([b[:-1] for b in bins[:-1]] + [bins[-1]])
def flatten(node, nbins=None, preserve_max=False):
if node.is_leaf:
return node
else:
N, M = node.children.shape[:2]
xind = {i: np.flatnonzero(node.children[i, :]) for i in range(N)}
yind = {j: np.flatnonzero(node.children[:, j]) for j in range(M)}
xind = {i: ind[0] for i, ind in xind.items() if ind.size}
yind = {j: ind[0] for j, ind in yind.items() if ind.size}
xbins = [node.children[i, xind[i]].xbins if i in xind
else np.linspace(node.xbins[i], node.xbins[i + 1], nbins + 1)
for i in range(N)]
ybins = [node.children[yind[j], j].ybins if j in yind
else np.linspace(node.ybins[j], node.ybins[j + 1], nbins + 1)
for j in range(M)]
xbins = bins_join(xbins)
ybins = bins_join(ybins)
# xbins = bins_join([c.xbins for c in node.children[:, 0]])
# ybins = bins_join([c.ybins for c in node.children[0, :]])
ndim = node.contingencies.ndim
if ndim == 3:
repeats = (nbins, nbins, 1)
else:
repeats = (nbins, nbins)
def child_contingency(node, row, col):
child = node.children[row, col]
if child is None:
return np.tile(node.contingencies[row, col], repeats)
elif preserve_max:
parent_max = np.max(node.contingencies[row, col])
c_max = np.max(child.contingencies)
if c_max > 0:
return child.contingencies * (parent_max / c_max)
else:
return child.contingencies
else:
return child.contingencies
contingencies = [[child_contingency(node, i, j)
for j in range(nbins)]
for i in range(nbins)]
contingencies = stack_tile_blocks(contingencies)
cnode = Tree(xbins, ybins, contingencies, None)
assert node.brect == cnode.brect
assert np.all(np.diff(cnode.xbins) > 0)
assert np.all(np.diff(cnode.ybins) > 0)
return cnode
def bindices(node, rect):
assert rect.normalized() == rect
assert not rect.intersect(QRectF(*node.brect)).isEmpty()
xs = np.searchsorted(node.xbins, rect.left(), side="left") - 1
xe = np.searchsorted(node.xbins, rect.right(), side="right")
ys = np.searchsorted(node.ybins, rect.top(), side="left") - 1
ye = np.searchsorted(node.ybins, rect.bottom(), side="right")
return np.clip([xs, xe, ys, ye],
[0, 0, 0, 0],
[node.xbins.size - 2, node.xbins.size - 1,
node.ybins.size - 2, node.ybins.size - 1])
def create_image(contingencies, palette=None, scale=None):
# import scipy.signal
# import scipy.ndimage
if scale is None:
scale = lambda c: c / (contingencies.max() or 1)
P = scale(contingencies)
# if scale > 0:
# P = contingencies / scale
# else:
# P = contingencies
# nbins = node.xbins.shape[0] - 1
# smoothing = 32
# bandwidth = nbins / smoothing
if P.ndim == 3:
ncol = P.shape[-1]
if palette is None:
palette = colorpalette.ColorPaletteGenerator(ncol)
colors = [palette[i] for i in range(ncol)]
colors = np.array(
[[c.red(), c.green(), c.blue()] for c in colors]
)
# P = scipy.ndimage.filters.gaussian_filter(
# P, bandwidth, mode="constant")
# P /= P.max()
argmax = np.argmax(P, axis=2)
irow, icol = np.indices(argmax.shape)
P_max = P[irow, icol, argmax]
positive = P_max > 0
P_max = np.where(positive, P_max * 0.95 + 0.05, 0.0)
colors = 255 - colors[argmax.ravel()]
# XXX: Non linear intensity scaling
colors *= P_max.ravel().reshape(-1, 1)
colors = colors.reshape(P_max.shape + (3,))
colors = 255 - colors
elif P.ndim == 2:
palette = colorpalette.ColorPaletteBW()
mix = P
positive = mix > 0
mix = np.where(positive, mix * 0.99 + 0.01, 0.0)
# mix = scipy.ndimage.filters.gaussian_filter(
# mix, bandwidth, mode="constant")
# mix /= mix.max() if total else 1.0
colors = np.zeros((np.prod(mix.shape), 3)) + 255
colors -= mix.ravel().reshape(-1, 1) * 255
colors = colors.reshape(mix.shape + (3,))
return colors
def score_candidate_rects(node, region):
"""
Score candidate bin rects in node.
Return a list of (score, i, j QRectF) list)
"""
xs, xe, ys, ye = bindices(node, region)
if node.contingencies.ndim == 3:
c = node.contingencies
# compute_chisqares expects classes in 1 dim
chi_lr, chi_up = compute_chi_squares(
c[xs: xe, ys: ye, :].swapaxes(1, 2).swapaxes(0, 1)
)
def max_chisq(i, j):
def valid(i, j):
return 0 <= i < chi_up.shape[0] and \
0 <= j < chi_lr.shape[1]
return max(chi_lr[i, j] if valid(i, j) else 0,
chi_lr[i, j - 1] if valid(i, j - 1) else 0,
chi_up[i, j] if valid(i, j) else 0,
chi_up[i - 1, j] if valid(i - 1, j) else 0)
return [(max_chisq(i - xs, j - ys), i, j,
QRectF(QPointF(node.xbins[i], node.ybins[j]),
QPointF(node.xbins[i + 1], node.ybins[j + 1])))
for i, j in itertools.product(range(xs, xe), range(ys, ye))]
else:
return [(1, i, j,
QRectF(QPointF(node.xbins[i], node.ybins[j]),
QPointF(node.xbins[i + 1], node.ybins[j + 1])))
for i, j in itertools.product(range(xs, xe), range(ys, ye))]
def compute_chi_squares(observes):
# compute chi squares for left-right neighbours
def get_estimates(observes):
estimates = []
for obs in observes:
n = obs.sum()
sum_rows = obs.sum(1)
sum_cols = obs.sum(0)
prob_rows = sum_rows / n
prob_cols = sum_cols / n
rows, cols = np.indices(obs.shape)
est = np.zeros(obs.shape)
est[rows, cols] = n * prob_rows[rows] * prob_cols[cols]
estimates.append(est)
return np.nan_to_num(np.array(estimates))
estimates = get_estimates(observes)
depth, rows, coll = np.indices(( observes.shape[0], observes.shape[1], observes.shape[2]-1 ))
colr = coll + 1
obs_dblstack = np.array([ observes[depth, rows, coll], observes[depth, rows, colr] ])
obs_pairs = np.zeros(( obs_dblstack.shape[1], obs_dblstack.shape[2], obs_dblstack.shape[3], obs_dblstack.shape[0] ))
depth, rows, coll, pairs = np.indices(obs_pairs.shape)
obs_pairs[depth, rows, coll, pairs] = obs_dblstack[pairs, depth, rows, coll]
depth, rows, coll = np.indices(( estimates.shape[0], estimates.shape[1], estimates.shape[2]-1 ))
colr = coll + 1
est_dblstack = np.array([ estimates[depth, rows, coll], estimates[depth, rows, colr] ])
est_pairs = np.zeros(( est_dblstack.shape[1], est_dblstack.shape[2], est_dblstack.shape[3], est_dblstack.shape[0] ))
depth, rows, coll, pairs = np.indices(est_pairs.shape)
est_pairs[depth, rows, coll, pairs] = est_dblstack[pairs, depth, rows, coll]
oe2e = (obs_pairs - est_pairs)**2 / est_pairs
chi_squares_lr = np.nan_to_num(np.nansum(np.nansum(oe2e, axis=3), axis=0))
# compute chi squares for up-down neighbours
depth, rowu, cols = np.indices(( observes.shape[0], observes.shape[1]-1, observes.shape[2] ))
rowd = rowu + 1
obs_dblstack = np.array([ observes[depth, rowu, cols], observes[depth, rowd, cols] ])
obs_pairs = np.zeros(( obs_dblstack.shape[1], obs_dblstack.shape[2], obs_dblstack.shape[3], obs_dblstack.shape[0] ))
depth, rowu, cols, pairs = np.indices(obs_pairs.shape)
obs_pairs[depth, rowu, cols, pairs] = obs_dblstack[pairs, depth, rowu, cols]
depth, rowu, cols = np.indices(( estimates.shape[0], estimates.shape[1]-1, estimates.shape[2] ))
rowd = rowu + 1
est_dblstack = np.array([ estimates[depth, rowu, cols], estimates[depth, rowd, cols] ])
est_pairs = np.zeros(( est_dblstack.shape[1], est_dblstack.shape[2], est_dblstack.shape[3], est_dblstack.shape[0] ))
depth, rowu, cols, pairs = np.indices(est_pairs.shape)
est_pairs[depth, rowu, cols, pairs] = est_dblstack[pairs, depth, rowu, cols]
oe2e = (obs_pairs - est_pairs)**2 / est_pairs
chi_squares_ud = np.nan_to_num(np.nansum(np.nansum(oe2e, axis=3), axis=0))
return (chi_squares_lr, chi_squares_ud)
def main():
import sip
app = QtGui.QApplication([])
w = OWHeatMap()
w.show()
w.raise_()
data = Orange.data.Table('iris')
# data = Orange.data.Table('housing')
data = Orange.data.Table('adult')
w.set_data(data)
rval = app.exec_()
w.onDeleteWidget()
sip.delete(w)
del w
app.processEvents()
return rval
if __name__ == "__main__":
import sys
sys.exit(main())
|
jzbontar/orange-tree
|
Orange/widgets/visualize/owheatmap.py
|
Python
|
gpl-3.0
| 46,118
|
#!/usr/bin/env python
import numpy as np
import cffirmware
def test_takeoff():
# Fixture
planner = cffirmware.planner()
cffirmware.plan_init(planner)
pos = cffirmware.mkvec(0, 0, 0)
yaw = 0
targetHeight = 1.0
targetYaw = 0
duration = 2
# Test
cffirmware.plan_takeoff(planner, pos, yaw, targetHeight, targetYaw, duration, 0)
# Assert
state = cffirmware.plan_current_goal(planner, duration)
assert np.allclose(np.array([0, 0, targetHeight]), state.pos)
assert np.allclose(np.array([0, 0, 0.0]), state.vel)
|
bitcraze/crazyflie-firmware
|
test_python/test_planner.py
|
Python
|
gpl-3.0
| 564
|
from django.conf.urls.defaults import patterns, include, url
from webui.platforms import utils
import logging
logger = logging.getLogger(__name__)
urlpatterns = patterns('',
url(r'^application/(?P<appname>[\w|\W]+)/$', 'webui.platforms.views.appdetails', name = "application_details"),
)
installed_platforms = utils.installed_platforms_list()
for platform in installed_platforms:
try:
urlpatterns += patterns('',
(r"^%s/" % platform, include("webui.platforms.%s.urls" % platform)),
)
except:
logger.debug ("Platform %s does not provides urls" % platform)
|
kermitfr/kermit-webui
|
src/webui/platforms/urls.py
|
Python
|
gpl-3.0
| 607
|
from django.db import models
# Create your models here.
class BatchHostSettings(models.Model):
name = models.CharField(max_length=200)
host = models.CharField(max_length=200)
user = models.CharField(max_length=200)
port = models.IntegerField(default=22)
sshpub = models.TextField(blank=True)
fs_engine = models.CharField(max_length=100, blank=True)
farm_engine = models.CharField(max_length=100, blank=True)
|
dziadu/batch_monitor
|
models.py
|
Python
|
gpl-3.0
| 415
|
#!/usr/bin/env python
#hi this is a test commit
from setuptools import setup
setup(name="BZRFlag",
version="1.0",
description="BZRFlag: BZFlag with Robotic Tanks!",
long_description="See README",
license="GNU GPL",
author="BYU AML Lab",
author_email="kseppi@byu.edu",
url="http://code.google.com/p/bzrflag/",
packages=['bzrflag'],
include_package_data = True,
package_data = {'': ['*.png', '*.txt', '*.ttf']},
test_suite="tests",
data_files=[('data', ['data/std_ground.png'])],
classifiers=['Development Status :: 4 - Beta',
'Operating System :: POSIX :: Linux',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Artificial Intelligence'],
)
|
edwardekstrom/BZRflag
|
setup.py
|
Python
|
gpl-3.0
| 996
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#a test for traverse directory
__author__ = 'AlbertS'
import os
import os.path
def showdir():
for parent, dirnames, filenames in os.walk("./"): #分别返回:父目录、所有文件夹名字(不含路径)、有文件名字
if '.git' not in parent and '.gita' not in dirnames:
for filename in filenames: #输出文件信息
print("parent is:" + parent)
print("filename is:" + filename)
print("the full name of the file is:" + os.path.join(parent,filename)) #输出文件路径信息
for dirname in dirnames: #输出文件夹信息
print("parent is:" + parent)
print("dirname is:" + dirname)
def dfs_showdir(path, depth):
if depth == 0:
print("root:[" + path + "]")
for item in os.listdir(path):
if '.git' not in item:
print("| " * depth + "+--" + item)
newitem = path +'/'+ item
if os.path.isdir(newitem):
dfs_showdir(newitem, depth +1)
if __name__ == '__main__':
dfs_showdir('.', 0)
|
AlbertGithubHome/TestPullRequest
|
ShowDirectory.py
|
Python
|
gpl-3.0
| 1,206
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
#end_pymotw_header
import warnings
warnings.filterwarnings('ignore',
'.*',
UserWarning,
'warnings_filtering',
14)
import warnings_filtering
|
qilicun/python
|
python2/PyMOTW-1.132/PyMOTW/warnings/warnings_filterwarnings_lineno.py
|
Python
|
gpl-3.0
| 349
|
import math
def sign(val):
return (-1 if val<0 else 1)
def find_vect_normal(vect):
n = [vect[1], -vect[0], 0]
return n
def mk_vect(s, e):
if len(e) == 3 and len(s) == 3:
return [e[0]-s[0], e[1]-s[1], e[2]-s[2]]
return [e[0]-s[0], e[1]-s[1], 0]
def normalize(v):
if len(v) == 3:
l = math.sqrt(v[0]**2 + v[1]**2 + v[2]**2)
return [v[0]/l, v[1]/l, v[2]/l]
l = math.sqrt(v[0]**2 + v[1]**2)
return [v[0]/l, v[1]/l, 0]
def vect_sum(v1, v2):
if len(v1) == 3 and len(v2) == 3:
return [v1[0]+v2[0], v1[1]+v2[1], v1[2]+v2[2]]
return [v1[0]+v2[0], v1[1]+v2[1], 0]
def vect_len(v):
if len(v) == 3:
return math.sqrt(v[0]**2+v[1]**2+v[2]**2)
return math.sqrt(v[0]**2+v[1]**2)
class OverlapEnum:
fully_covers = 1
partially_overlap = 2
fully_lays_inside = 3
no_overlap = 4
class AABB:
def __init__(self, sx, sy, ex, ey):
#print sx, sy, ex, ey
self.left = min(sx, ex)
self.right = max(sx, ex)
self.top = max(sy, ey)
self.bottom = min(sy, ey)
def point_in_aabb(self, pt):
x = pt[0]
y = pt[1]
if (x>=self.left and x<=self.right and y>=self.bottom and y<=self.top):
return True
return False
def aabb_in_aabb(self, box, check_inside_overlap=True):
lt = False
lb = False
rt = False
rb = False
if self.point_in_aabb((box.left, box.top)):
lt = True
if self.point_in_aabb((box.left, box.bottom)):
lb = True
if self.point_in_aabb((box.right, box.bottom)):
rb = True
if self.point_in_aabb((box.right, box.top)):
rt = True
oe = OverlapEnum
if lt and lb and rt and rb:
return OverlapEnum.fully_covers
elif lt or lb or rt or rb:
return OverlapEnum.partially_overlap
if check_inside_overlap:
if box.aabb_in_aabb(self, False)==OverlapEnum.fully_covers:
return OverlapEnum.fully_lays_inside
return OverlapEnum.no_overlap
def __repr__(self):
return "AABB (l: %f r: %f t: %f b: %f)" % (self.left, self.right, self.top, self.bottom)
def pt_to_pt_dist(p1, p2):
return math.sqrt((p1[0]-p2[0])**2+(p1[1]-p2[1])**2)
class CircleUtils:
def __init__(self, center, radius, uses_inner_space=False):
self.center = center
self.radius = radius
self.uses_inner_space=uses_inner_space
def get_aabb(self):
cx = self.center[0]
cy = self.center[1]
r = self.radius
return AABB(cx-r, cy-r, cx+3, cy+r)
def distance_to_pt(self, pt):
dist = pt_to_pt_dist(pt, self.center)-self.radius
if self.uses_inner_space:
return 0 if dist<0 else dist
return abs(dist)
class ArcUtils:
def __init__(self, center, radius, startangle, endangle):
self.center = center
self.radius = radius
self.sa = startangle
self.ea = endangle
self.start = (self.center[0]+math.cos(self.sa)*self.radius, self.center[1]+math.sin(self.sa)*self.radius)
self.end = (self.center[0]+math.cos(self.ea)*self.radius, self.center[1]+math.sin(self.ea)*self.radius)
def get_aabb(self):
x_min = min(self.start[0], self.end[0])
y_min = min(self.start[1], self.end[1])
x_max = max(self.start[0], self.end[0])
y_max = max(self.start[1], self.end[1])
return AABB(x_min, y_min, x_max, y_max)
def __angledist(self, s, e):
if s<0:
s+=2*math.pi
if e<0:
e+=2*math.pi
d = 0
if e<s:
d=(2*math.pi-s)+e
else:
d=e-s
return d
def check_angle_in_range(self, a):
s = self.sa
e = self.ea
se_dist = self.__angledist(s, e)
sa_dist = self.__angledist(s, a)
ae_dist = self.__angledist(a, e)
if sa_dist+ae_dist!=se_dist:
return False
return True
def distance_to_pt(self, pt):
a = math.atan2(pt[1]-self.center[1], pt[0]-self.center[0])
print a, self.sa, self.ea
if self.check_angle_in_range(a):
dist = pt_to_pt_dist(pt, self.center)-self.radius
else:
dist = 1000
return abs(dist)
def get_normalized_start_normal(self):
se_dir = mk_vect(self.start, self.end)
se_dir = normalize(se_dir)
v = mk_vect(self.center, self.start)
v = normalize(v)
z_dir = [0, 0, se_dir[0]*v[1]-se_dir[1]*v[0]]
z_dir = normalize(z_dir)
v_dir = [z_dir[2]*v[1], -v[0]*z_dir[2], 0]
v_dir = normalize(v_dir)
return find_vect_normal(v_dir)
def get_normalized_end_normal(self):
return self.get_normalized_start_normal()
def check_if_pt_belongs(self, pt):
v = mk_vect(pt, self.center)
if abs((self.radius) - vect_len(v))>0.001:
#print "radius check failed"
return False
a = math.atan2(pt[1]-self.center[1], pt[0]-self.center[0])
if (self.check_angle_in_range(a)):
#print "radius and angle ok"
return True
return False
def find_intersection(self, other_element):
oe = other_element
if other_element.__class__.__name__ == "LineUtils":
#print "line to arc"
dx = oe.end[0] - oe.start[0]
dy = oe.end[1] - oe.start[1]
dr = math.sqrt(dx**2 + dy**2)
d = oe.start[0]*oe.end[1] - oe.start[1]*oe.end[0]
desc = ((self.radius)**2)*dr**2 - d**2
intersections = []
if desc == 0:
#single point
#print "single point"
x_i = (d*dy+sign(dy)*dx*math.sqrt(desc))/(dr**2)
y_i = (-d*dx+abs(dy)*math.sqrt(desc))/(dr**2)
intersections.append((x_i, y_i))
elif desc > 0:
#intersection
#print "intersection"
x_i1 = (d*dy+sign(dy)*dx*math.sqrt(desc))/(dr**2)
y_i1 = (-d*dx+abs(dy)*math.sqrt(desc))/(dr**2)
x_i2 = (d*dy-sign(dy)*dx*math.sqrt(desc))/(dr**2)
y_i2 = (-d*dx-abs(dy)*math.sqrt(desc))/(dr**2)
intersections.append((x_i1, y_i1))
intersections.append((x_i2, y_i2))
else:
#no intersection
#print "no intersection"
return None
#check whether intersections are inside existing element sections
checked_intersections = []
for pt in intersections:
if oe.check_if_pt_belongs(pt):
#print "oe belongs"
if self.check_if_pt_belongs(pt):
#print "self belongs"
checked_intersections.append(pt)
else:
#print "self failed"
pass
else:
#print "oe failed"
pass
if len(checked_intersections)>0:
return checked_intersections
else:
print "A: Not calc util:", other_element.__class__.__name__
return None
class LineUtils:
def __init__(self, start, end):
self.start = start
self.end = end
def get_aabb(self):
return AABB(self.start[0], self.start[1], self.end[0], self.end[1])
def distance_to_pt(self, pt):
dist = 0
a = math.atan2(self.start[1]-self.end[1], self.start[0]-self.end[0])
#print "alpha:", a
sina = math.sin(a)
cosa = math.cos(a)
s = self.__reproject_pt(self.start, sina, cosa)
#s = self.start
e = self.__reproject_pt(self.end, sina, cosa)
p = self.__reproject_pt(pt, sina, cosa)
left = min(s[0], e[0])
right = max(s[0], e[0])
if p[0]<=left:
dist = pt_to_pt_dist(p, (left, s[1]))
return dist
elif p[0]>=right:
dist = pt_to_pt_dist(p, (right, s[1]))
return dist
a = pt_to_pt_dist(s, e) #abs(s[0]-e[0])
b = pt_to_pt_dist(s, p)
c = pt_to_pt_dist(e, p)
p = (a+b+c)/2.0
#print "a:", a, "b:", b, "c:", c, "p:", p, p*(p-a)*(p-b)*(p-c)
dist = abs(math.sqrt(p*(p-a)*(p-b)*(p-c))*2/a)
#print "dist:" , dist
return dist
def check_if_pt_belongs(self, pt):
x_i, y_i = pt
minx = min(self.start[0], self.end[0])
maxx = max(self.start[0], self.end[0])
miny = min(self.start[1], self.end[1])
maxy = max(self.start[1], self.end[1])
#print "s:", (minx, miny), "e:", (maxx, maxy)
x_intersects = False
y_intersects = False
if abs(minx-maxx)<0.001:
if abs(x_i-minx)<0.001:
x_intersects = True
#print "dx=0"
else:
return False
elif x_i>=minx and x_i<=maxx:
x_intersects = True
if abs(miny-maxy)<0.001:
if abs(y_i-miny)<0.001:
y_intersects = True
#print "dy=0"
else:
return False
elif y_i>=miny and y_i<=maxy:
y_intersects = True
if x_intersects and y_intersects:
return True
return False
def find_intersection(self, other_element):
oe = other_element
#print oe
if other_element.__class__.__name__ == "LineUtils":
#print "line to line"
# line to line intersection
ms, me = self.start, self.end
ma = me[1]-ms[1]
mb = ms[0]-me[0]
mc = ma*ms[0]+mb*ms[1]
ms, me = oe.start, oe.end
oa = me[1]-ms[1]
ob = ms[0]-me[0]
oc = oa*ms[0]+ob*ms[1]
det = float(ma*ob - oa*mb)
if det == 0:
# lines are parallel
#print "parallel"
return None
else:
x_i = (ob*mc-mb*oc)/det
y_i = (ma*oc-oa*mc)/det
#print "int:", x_i, y_i
if self.check_if_pt_belongs((x_i, y_i)):
#print "on self"
if oe.check_if_pt_belongs((x_i, y_i)):
#print "on oe"
return [(x_i, y_i),]
else:
#print "not on oe"
#print "int:", x_i, y_i
#print "oe:", oe.start, oe.end
pass
else:
pass
#print "not on self"
elif other_element.__class__.__name__ == "ArcUtils":
#print "arc to line"
return oe.find_intersection(self)
else:
print "L: Not calc util:", other_element.__class__.__name__
return None
def get_normalized_start_normal(self):
v = mk_vect(self.start, self.end)
vn = normalize(v)
return find_vect_normal(vn)
def get_normalized_end_normal(self):
v = mk_vect(self.start, self.end)
vn = normalize(v)
return find_vect_normal(vn)
def __reproject_pt(self, pt, sina, cosa):
return (pt[0]*cosa-pt[1]*sina, pt[0]*sina+pt[1]*cosa)
def parse_args(argv, description):
current_arg = None
for arg in argv[1:]:
if current_arg != None:
description[current_arg]["arg"] = arg
current_arg = None
else:
if arg not in description:
print "unknown argument:", arg
return False
print "arg:", arg
description[arg]["present"] = True
if description[arg]["arglen"] == 1:
current_arg = arg
return True
if __name__=="__main__":
au = ArcUtils((0, 0), 1, -10*math.pi/180.0, 300*math.pi/180.0)
angle = 90
print "checking angle", angle, au.check_angle_in_range(angle*math.pi/180.0)
angle = 190
print "checking angle", angle, au.check_angle_in_range(angle*math.pi/180.0)
angle = -15
print "checking angle", angle, au.check_angle_in_range(angle*math.pi/180.0)
angle = 290
print "checking angle", angle, au.check_angle_in_range(angle*math.pi/180.0)
angle = 301
print "checking angle", angle, au.check_angle_in_range(angle*math.pi/180.0)
|
snegovick/map_editor
|
tileset_editor/utils.py
|
Python
|
gpl-3.0
| 12,522
|
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import Http404, JsonResponse
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.views.decorators.cache import never_cache
from django.views.decorators.http import require_GET
from repanier.const import PERMANENCE_OPENED, DECIMAL_ZERO, EMPTY_STRING
from repanier.models.customer import Customer
from repanier.models.invoice import ProducerInvoice, CustomerInvoice
from repanier.models.offeritem import OfferItemReadOnly
from repanier.models.permanence import Permanence
from repanier.tools import (
create_or_update_one_cart_item,
sint,
sboolean,
my_basket,
get_html_selected_value,
get_html_basket_message,
get_repanier_template_name,
)
@never_cache
@require_GET
@login_required
def order_ajax(request):
"""
Add a selected offer item to a customer order (i.e. update the customer's invoice and the producer's invoice)
"""
user = request.user
customer = Customer.objects.filter(id=user.customer_id, may_order=True).first()
if customer is None:
raise Http404
offer_item_id = sint(request.GET.get("offer_item", 0))
value_id = sint(request.GET.get("value", 0))
is_basket = sboolean(request.GET.get("is_basket", False))
qs = CustomerInvoice.objects.filter(
permanence__offeritem=offer_item_id,
customer_id=customer.id,
status=PERMANENCE_OPENED,
)
json_dict = {}
if qs.exists():
qs = ProducerInvoice.objects.filter(
permanence__offeritem=offer_item_id,
producer__offeritem=offer_item_id,
status=PERMANENCE_OPENED,
)
if qs.exists():
purchase, updated = create_or_update_one_cart_item(
customer=customer,
offer_item_id=offer_item_id,
value_id=value_id,
batch_job=False,
comment=EMPTY_STRING,
)
offer_item = OfferItemReadOnly.objects.filter(id=offer_item_id).first()
if purchase is None:
json_dict[
"#offer_item{}".format(offer_item.id)
] = get_html_selected_value(offer_item, DECIMAL_ZERO, is_open=True)
else:
json_dict[
"#offer_item{}".format(offer_item.id)
] = get_html_selected_value(
offer_item, purchase.quantity_ordered, is_open=True
)
if settings.REPANIER_SETTINGS_SHOW_PRODUCER_ON_ORDER_FORM:
producer_invoice = (
ProducerInvoice.objects.filter(
producer_id=offer_item.producer_id,
permanence_id=offer_item.permanence_id,
)
.only("total_price_with_tax")
.first()
)
json_dict.update(producer_invoice.get_order_json())
customer_invoice = CustomerInvoice.objects.filter(
permanence_id=offer_item.permanence_id, customer_id=customer.id
).first()
invoice_confirm_status_is_changed = customer_invoice.cancel_confirm_order()
if invoice_confirm_status_is_changed:
if settings.REPANIER_SETTINGS_CUSTOMER_MUST_CONFIRM_ORDER:
template_name = get_repanier_template_name(
"communication_confirm_order.html"
)
html = render_to_string(template_name)
json_dict["#communicationModal"] = mark_safe(html)
customer_invoice.save()
json_dict.update(
my_basket(
customer_invoice.is_order_confirm_send,
customer_invoice.get_total_price_with_tax(),
)
)
permanence = Permanence.objects.filter(id=offer_item.permanence_id).first()
if is_basket:
basket_message = get_html_basket_message(
customer, permanence, PERMANENCE_OPENED
)
else:
basket_message = EMPTY_STRING
json_dict.update(
customer_invoice.get_html_my_order_confirmation(
permanence=permanence,
is_basket=is_basket,
basket_message=basket_message,
)
)
return JsonResponse(json_dict)
|
pcolmant/repanier
|
repanier/views/order_ajax.py
|
Python
|
gpl-3.0
| 4,550
|
#!/usr/bin/env python3
# Copyright 2015 Ivan awamper@gmail.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import sys
import subprocess
from gi.repository import Gio
from gi.repository import Gdk
from gi.repository import Notify
from gi.repository import GdkPixbuf
from draobpilc import get_data_path
from draobpilc.version import APP_NAME
Notify.init(APP_NAME)
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(
r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$',
re.IGNORECASE
)
class SettingsSchemaNotFound(Exception):
""" """
class NotifyAction():
def __init__(self, id_, label, user_data=None, callback=None):
if not isinstance(id_, str):
raise ValueError('id_ must be a string')
if not callable(callback):
raise ValueError('callback must be a function')
self._id_ = id_
self._label = label or id_
self._callback = callback
self._user_data = user_data
@property
def id_(self):
return self._id_
@property
def label(self):
return self._label
@property
def callback(self):
return self._callback
@property
def user_data(self):
return self._user_data
# FixMe: actions dont work
def notify(
summary=APP_NAME,
body=None,
timeout=Notify.EXPIRES_DEFAULT,
urgency=Notify.Urgency.NORMAL,
icon_name_or_path=get_data_path('draobpilc.png'),
actions=None
):
notification = Notify.Notification.new(summary, body, icon_name_or_path)
notification.set_timeout(timeout)
notification.set_urgency(urgency)
if isinstance(actions, list):
for action in actions:
if not isinstance(action, NotifyAction): continue
notification.add_action(
action.id_,
action.label,
action.callback,
action.user_data
)
notification.show()
return notification
def restart_app():
from draobpilc.common import APPLICATION
APPLICATION.quit()
subprocess.Popen('draobpilc')
sys.exit()
def get_settings(schema_id, schema_dir=None):
if not schema_dir:
schema_source = Gio.SettingsSchemaSource.get_default()
else:
schema_source = Gio.SettingsSchemaSource.new_from_directory(
schema_dir,
Gio.SettingsSchemaSource.get_default(),
False
)
settings = schema_source.lookup(schema_id, True)
if not settings:
raise SettingsSchemaNotFound(
'Schema "{schema}"" could not be found'.format(schema=schema_id)
)
return Gio.Settings(settings_schema=settings)
def is_url(string):
result = False
urls = extract_urls(string)
if len(urls) == 1 and len(urls[0]) == len(string):
result = True
return result
# adopted from django
def extract_urls(text):
def unescape(text, trail):
"""
If input URL is HTML-escaped, unescape it so as we can safely feed it to
smart_urlquote. For example:
http://example.com?x=1&y=<2> => http://example.com?x=1&y=<2>
"""
unescaped = (text + trail).replace(
'&', '&').replace('<', '<').replace(
'>', '>').replace('"', '"').replace(''', "'")
if trail and unescaped.endswith(trail):
# Remove trail for unescaped if it was not consumed by unescape
unescaped = unescaped[:-len(trail)]
elif trail == ';':
# Trail was consumed by unescape (as end-of-entity marker),
# move it to text
text += trail
trail = ''
return text, unescaped, trail
trailing_punctuation = ['.', ',', ':', ';', '.)', '"', '\'', '!']
wrapping_punctuation = [
('(', ')'),
('<', '>'),
('[', ']'),
('<', '>'),
('"', '"'),
('\'', '\'')
]
word_split_re = re.compile(r'''([\s<>"']+)''')
result = []
words = word_split_re.split(text)
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in trailing_punctuation:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in wrapping_punctuation:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (
middle.endswith(closing)
and middle.count(closing) == middle.count(opening) + 1
):
middle = middle[:-len(closing)]
trail = closing + trail
url = None
if simple_url_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = middle_unescaped
elif simple_url_2_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = 'http://%s' % middle_unescaped
if url: result.append(url)
return result
def is_pointer_inside_widget(widget, x=None, y=None):
result = False
window = widget.get_window()
allocation = widget.get_allocation()
if not allocation or not window: return result
_, pointer_x, pointer_y, mask = window.get_pointer()
if x: pointer_x = x
if y: pointer_y = y
if (
pointer_x >= allocation.x and
pointer_x <= (allocation.x + allocation.width) and
pointer_y >= allocation.y and
pointer_y <= (allocation.y + allocation.height)
):
result = True
return result
def get_widget_screenshot(widget):
window = widget.get_window()
if not window: return None
allocation = widget.get_allocation()
pixbuf = Gdk.pixbuf_get_from_window(
window,
allocation.x,
allocation.y,
allocation.width,
allocation.height
)
if not pixbuf:
return None
else:
return pixbuf
def is_visible_on_scroll(adjustment, widget):
allocation_box = widget.get_allocation()
return (
widget.is_visible() and
allocation_box.y >= adjustment.get_value() and
allocation_box.y + allocation_box.height < (
adjustment.get_value() + adjustment.get_page_size()
)
)
|
awamper/draobpilc
|
draobpilc/lib/utils.py
|
Python
|
gpl-3.0
| 7,280
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2013 by Erwin Marsi and TST-Centrale
#
# This file is part of the DAESO Framework.
#
# The DAESO Framework is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# The DAESO Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
parallel graph corpus diff and analysis
"""
__authors__ = "Erwin Marsi <e.marsi@gmail.com>"
# TODO
# - a number of things here might be Alpino specific
from sys import stdout
from daeso.pgc.corpus import ParallelGraphCorpus
from daeso.utils.report import header
def pgc_diff(corpus1, corpus2,
corpus_name1="Corpus1", corpus_name2="Corpus2",
annot1="Annot1", annot2="Annot2",
words_only=False,
show_comments=False,
show_ident=False,
relations=None,
out=stdout):
"""
reports the differences (and optionally the similarities) between
the labeled alignments from two parallel graph corpora
"""
assert len(corpus1) == len(corpus2)
if not relations:
relations = corpus1.get_relations()
# counter for numbering the alignments when printing;
# may be less than the actual number of alignments when identical alignments
# are not printed (cf. show_ident option)
align_count = 0
# counter for numbering the graph pairs when printing
pair_count = 0
header("%s corpus: %s\n%s corpus: %s" % (annot1, corpus_name1, annot2,
corpus_name2), width=120, char="#")
for graph_pair1, graph_pair2 in zip(corpus1, corpus2):
# assume that the corpora have the same graph pairs in the same order,
# so the only difference is in the aligned nodes
assert graph_pair1._banks == graph_pair2._banks
assert graph_pair1._graphs_equal(graph_pair2)
pair_count += 1
ident = []
rel_diff = []
uniq1 = []
uniq2 = []
# recall that graphs are identical
graphs = graph_pair1.get_graphs()
for nodes, rel1 in graph_pair1.alignments_iter(relations=relations):
if ( words_only and
graphs.source.node_node_is_non_terminal(nodes.source) and
graphs.target.node_is_non_terminal(nodes.target) ):
continue
rel2 = graph_pair2.get_align(nodes)
if not rel2:
uniq1.append(nodes)
elif rel1 == rel2:
ident.append(nodes)
else:
rel_diff.append(nodes)
for nodes, rel2 in graph_pair2.alignments_iter(relations=relations):
if ( words_only and
( graphs.source.node_is_terminal(nodes.source) or
graphs.target.node_is_terminal(nodes.target) )):
continue
if not graph_pair1.get_align(nodes):
uniq2.append(nodes)
#if not ( ident and rel_diff and uniq1 and uniq2 and show_comments ):
# continue
header("Graph pair %d" % pair_count, width=120, char="=")
print >>out, graphs.source.get_graph_token_string().encode("utf-8"), "\n"
print >>out, graphs.target.get_graph_token_string().encode("utf-8"), "\n"
if show_comments:
print_comments(graph_pair1, annot1, out)
print_comments(graph_pair2, annot2, out)
if show_ident:
ident.sort(cmp=cmp_nodes)
align_count = print_alignments(align_count, "Identical",
graph_pair1, graph_pair2, graphs, ident, out)
rel_diff.sort(cmp=cmp_nodes)
align_count = print_alignments(align_count, "Relation different",
graph_pair1, graph_pair2, graphs, rel_diff, out)
uniq1.sort(cmp=cmp_nodes)
align_count = print_alignments(align_count, annot1 + " only",
graph_pair1, graph_pair2, graphs, uniq1, out)
uniq2.sort(cmp=cmp_nodes)
align_count = print_alignments(align_count, annot2 + " only",
graph_pair1, graph_pair2, graphs, uniq2, out)
def cmp_nodes(nodes1, nodes2):
return cmp(int(nodes1.source), int(nodes2.source))
def print_alignments(align_count, title, graph_pair1, graph_pair2, graphs,
nodes_list, out=stdout):
if nodes_list:
header(title, out, char="-")
for nodes in nodes_list:
align_count += 1
rel1 = str(graph_pair1.get_align(nodes))
rel2 = str(graph_pair2.get_align(nodes))
# tricky because of implicit coercions,
# see "Formatting Markers" http://www.python.org/dev/peps/pep-0100/
print >>out, "#%d:" % align_count
s = '(%s) %s [%s:%s]: "%s"' % (
nodes.source,
graphs.source.node[nodes.source]["label"].encode("utf-8"),
graphs.source.node[nodes.source]["begin"],
graphs.source.node[nodes.source]["end"],
graphs.source.get_node_token_string(nodes.source))
print >>out, s.encode("utf-8")
print >>out, "<<<", rel1.upper(), "/", rel2.upper(), ">>>"
s = '(%s) %s [%s:%s]: "%s"\n' % (
nodes.target,
graphs.target.node[nodes.target]["label"],
graphs.target.node[nodes.target]["begin"],
graphs.target.node[nodes.target]["end"],
graphs.target.get_node_token_string(nodes.target))
print >>out, s.encode("utf-8")
return align_count
def print_comments(graph_pair, annot, out, encoding="utf-8"):
try:
comment = graph_pair.get_meta_data().find("comment").text
except AttributeError:
return
if comment.strip():
header("Comments by " + annot, out, char="-")
print >>out, comment.encode(encoding), "\n"
|
emsrc/daeso-framework
|
lib/daeso/pgc/diff.py
|
Python
|
gpl-3.0
| 6,732
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from sqlalchemy.event import listens_for
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db import db
from indico.util.decorators import strict_classproperty
class LocationMixin:
"""Mixin to store location information in a model.
A location in this context can be either a reference to a room in
the roombooking module or a room and location name.
In case the location is inherited, the `location_parent` property
is used to determine the parent object from which the location is
inherited (which may also inherit its location).
"""
#: The name of the backref added to the `Room` model for items
#: which are associated with that room.
location_backref_name = None
#: Whether the item can inherit its location from a parent. If
#: this is ``False``, `location_parent` should not be overridden.
allow_location_inheritance = True
@strict_classproperty
@classmethod
def __auto_table_args(cls):
checks = [db.CheckConstraint("(room_id IS NULL) OR (venue_name = '' AND room_name = '')",
'no_custom_location_if_room'),
db.CheckConstraint("(venue_id IS NULL) OR (venue_name = '')",
'no_venue_name_if_venue_id'),
db.CheckConstraint("(room_id IS NULL) OR (venue_id IS NOT NULL)",
'venue_id_if_room_id')]
if cls.allow_location_inheritance:
checks.append(db.CheckConstraint("NOT inherit_location OR (venue_id IS NULL AND room_id IS NULL AND "
"venue_name = '' AND room_name = '' AND address = '')",
'inherited_location'))
fkeys = [db.ForeignKeyConstraint(['venue_id', 'room_id'],
['roombooking.rooms.location_id', 'roombooking.rooms.id'])]
return tuple(checks) + tuple(fkeys)
@classmethod
def register_location_events(cls):
"""Register sqlalchemy events needed by this mixin.
Call this method after the definition of a model which uses
this mixin class.
"""
@listens_for(cls.own_venue, 'set')
def _venue_changed(target, value, *unused):
if value is not None:
target.own_venue_name = ''
@listens_for(cls.own_room, 'set')
def _room_changed(target, value, *unused):
if value is not None:
target.own_room_name = ''
target.own_venue = value.location
@property
def location_parent(self):
"""The parent object to consult if the location is inherited."""
if not self.allow_location_inheritance:
return None
raise NotImplementedError
@declared_attr
def inherit_location(cls):
if cls.allow_location_inheritance:
return db.Column(
db.Boolean,
nullable=False,
default=True
)
else:
return False
@declared_attr
def own_room_id(cls):
return db.Column(
'room_id',
db.Integer,
db.ForeignKey('roombooking.rooms.id'),
nullable=True,
index=True
)
@declared_attr
def own_venue_id(cls):
return db.Column(
'venue_id',
db.Integer,
db.ForeignKey('roombooking.locations.id'),
nullable=True,
index=True
)
@declared_attr
def own_venue_name(cls):
return db.Column(
'venue_name',
db.String,
nullable=False,
default=''
)
@declared_attr
def own_room_name(cls):
return db.Column(
'room_name',
db.String,
nullable=False,
default=''
)
@declared_attr
def own_address(cls):
return db.Column(
'address',
db.Text,
nullable=False,
default=''
)
@declared_attr
def own_venue(cls):
return db.relationship(
'Location',
foreign_keys=[cls.own_venue_id],
lazy=True,
backref=db.backref(
cls.location_backref_name,
lazy='dynamic'
)
)
@declared_attr
def own_room(cls):
return db.relationship(
'Room',
foreign_keys=[cls.own_room_id],
lazy=True,
backref=db.backref(
cls.location_backref_name,
lazy='dynamic'
)
)
@property
def venue(self):
"""The venue (Location) where this item is located.
This is ``None`` if a custom venue name was entered.
"""
if self.inherit_location and self.location_parent is None:
return None
return self.own_venue if not self.inherit_location else self.location_parent.venue
@venue.setter
def venue(self, venue):
self.own_venue = venue
@property
def room(self):
"""The Room where this item is located.
This is ``None`` if a custom room name was entered.
"""
if self.inherit_location and self.location_parent is None:
return None
return self.own_room if not self.inherit_location else self.location_parent.room
@room.setter
def room(self, room):
self.own_room = room
@property
def venue_name(self):
"""The name of the location where this item is located."""
if self.inherit_location and self.location_parent is None:
return ''
venue = self.venue
if venue is not None:
return venue.name
return self.own_venue_name if not self.inherit_location else self.location_parent.venue_name
@venue_name.setter
def venue_name(self, venue_name):
self.own_venue_name = venue_name
def get_room_name(self, full=True, verbose=False):
"""The name of the room where this item is located.
If both ``full`` and ``verbose`` are set to ``False``, the
"friendly name" will be returned in that case. Both ``full`` and
``verbose`` cannot be set to ``True``.
:param full: If the room has a "friendly name" (e.g. 'Main
Amphitheatre'), a composite name will be returned.
:param verbose: The `verbose_name` of the room will be returned.
"""
assert sum([full, verbose]) <= 1
if self.inherit_location and self.location_parent is None:
return ''
room = self.room
if room is not None:
if full:
return room.full_name
elif verbose and room.verbose_name:
return room.verbose_name
else:
return room.name
return (self.own_room_name if not self.inherit_location
else self.location_parent.get_room_name(full=full, verbose=verbose))
@property
def room_name(self):
"""The name of the room where this item is located."""
return self.get_room_name(full=True)
@room_name.setter
def room_name(self, room_name):
self.own_room_name = room_name
@property
def has_location_info(self):
"""Whether the object has basic location information set."""
return bool(self.venue_name or self.room_name)
@property
def address(self):
"""The address where this item is located."""
if self.inherit_location and self.location_parent is None:
return ''
return self.own_address if not self.inherit_location else self.location_parent.address
@address.setter
def address(self, address):
self.own_address = address
@property
def location_data(self):
"""All location data for the item.
Returns a dict containing ``source``, ``inheriting``, ``room``,
``room_name``, ``venue_name`` and ``address``. The
``source`` is the object the location data is taken from, i.e.
either the item itself or the object the location data is
inherited from.
"""
data_source = self
while data_source and data_source.inherit_location:
data_source = data_source.location_parent
if data_source is None:
return {'source': None, 'venue': None, 'room': None, 'room_name': '', 'venue_name': '', 'address': '',
'inheriting': False}
else:
return {'source': data_source, 'venue': data_source.venue, 'room': data_source.room,
'room_name': data_source.room_name, 'venue_name': data_source.venue_name,
'address': data_source.address, 'inheriting': self.inherit_location}
@location_data.setter
def location_data(self, data):
self.inherit_location = data['inheriting']
self.venue_name = ''
self.room_name = ''
if self.inherit_location:
self.room = None
self.venue = None
self.address = ''
else:
self.room = data.get('room')
self.venue = data.get('venue')
self.address = data.get('address', '')
if not self.room:
self.room_name = data.get('room_name', '')
if not self.venue:
self.venue_name = data.get('venue_name', '')
@property
def widget_location_data(self):
"""All location data for the item, meant to be used in the location
widget.
"""
location_data = self.location_data
return {
'address': location_data['address'],
'room_name': location_data['room_name'],
'room_id': location_data['room'].id if location_data['room'] else '',
'venue_name': location_data['venue_name'],
'venue_id': location_data['venue'].id if location_data['venue'] else '',
}
def get_inherited_widget_location_data(self, init_inheritance):
"""Determine whether to return the object's location or the parent's.
If the object inherits its location, then the location source object is
the object's parent, so return the source's location. If the object
doesn't inherit its location, then the location source object is the
object itself, so return the source's parent location.
"""
return (self.location_parent.widget_location_data if not init_inheritance and self.location_parent
else self.widget_location_data)
|
DirkHoffmann/indico
|
indico/core/db/sqlalchemy/locations.py
|
Python
|
gpl-3.0
| 10,872
|
# Plugin by Infinity - <https://github.com/infinitylabs/UguuBot>
from util import hook, http, text
db_ready = False
def db_init(db):
"""check to see that our db has the horoscope table and return a connection."""
db.execute("create table if not exists horoscope(nick primary key, sign)")
db.commit()
db_ready = True
@hook.command(autohelp=False)
def horoscope(inp, db=None, notice=None, nick=None):
"""horoscope <sign> -- Get your horoscope."""
if not db_ready:
db_init(db)
# check if the user asked us not to save his details
dontsave = inp.endswith(" dontsave")
if dontsave:
sign = inp[:-9].strip().lower()
else:
sign = inp
db.execute("create table if not exists horoscope(nick primary key, sign)")
if not sign:
sign = db.execute("select sign from horoscope where nick=lower(?)",
(nick,)).fetchone()
if not sign:
notice(horoscope.__doc__)
return
sign = sign[0]
url = "http://my.horoscope.com/astrology/free-daily-horoscope-%s.html" % sign
soup = http.get_soup(url)
title = soup.find_all('h1', {'class': 'h1b'})[1]
horoscope = soup.find('div', {'class': 'fontdef1'})
result = "\x02%s\x02 %s" % (title, horoscope)
result = text.strip_html(result)
#result = unicode(result, "utf8").replace('flight ','')
if not title:
return "Could not get the horoscope for {}.".format(inp)
if inp and not dontsave:
db.execute("insert or replace into horoscope(nick, sign) values (?,?)",
(nick.lower(), sign))
db.commit()
return result
|
thejordan95/Groovebot2
|
plugins/horoscope.py
|
Python
|
gpl-3.0
| 1,664
|
import copy
import json
import logging
import colander
import datetime
from pyramid.httpexceptions import HTTPBadRequest
from lokp.config.form import getCategoryList
from lokp.models import DBSession
from lokp.protocols.activity_protocol import ActivityProtocol
from lokp.protocols.stakeholder_protocol import StakeholderProtocol
from lokp.review.activities import ActivityReview
from lokp.review.stakeholders import StakeholderReview
from lokp.utils.views import validate_item_type
log = logging.getLogger(__name__)
def structHasOnlyNullValues(cstruct, depth=0):
"""
Recursive function checking if the 'struct' value of a form only contains
empty values.
Also return the depth of the recursion, which allows to identify what type
the current 'struct' value is:
0: A single tag
1: A Taggroup
2: A Thematic Group
3: A Category
"""
allNull = True
newDepth = None
if cstruct == colander.null:
allNull = allNull and True
elif isinstance(cstruct, dict):
# A dict. Go one level deeper for each.
for c in cstruct:
a, d = structHasOnlyNullValues(cstruct[c], depth + 1)
if newDepth is None:
newDepth = d
else:
newDepth = max(newDepth, d)
allNull = allNull and a
elif isinstance(cstruct, list):
# A list. Go through each item of the list (though this does not mean a
# recursion level deeper)
for c in cstruct:
a, d = structHasOnlyNullValues(c, depth)
newDepth = d if newDepth is None else max(newDepth, d)
allNull = allNull and a
else:
# Values are not null.
allNull = allNull and False
return allNull, newDepth if newDepth is not None else depth
def calculate_deletion_diff(request, item_type):
identifier = request.POST.get('id')
version = request.POST.get('version')
if not identifier or not version:
raise HTTPBadRequest(
'Unknown item to delete')
if validate_item_type(item_type) == 'a':
protocol = ActivityProtocol(DBSession)
other_item_type = 'stakeholders'
else:
protocol = StakeholderProtocol(DBSession)
other_item_type = 'activities'
item = protocol.read_one_by_version(
request, identifier, version, translate=False).to_table(request)
# Collect every taggroup and tag, mark all to be deleted.
taggroups_diff = []
for taggroup in item.get('taggroups', []):
tags_diff = []
for tag in taggroup.get('tags', []):
tags_diff.append({
'key': tag.get('key'),
'value': tag.get('value'),
'op': 'delete'
})
taggroups_diff.append({
'tg_id': taggroup.get('tg_id'),
'tags': tags_diff,
'op': 'delete'
})
# Collect every involvement and mark them to be deleted.
involvement_diff = []
for involvement in item.get('involvements', []):
involvement_diff.append({
'id': involvement.get('data', {}).get('id'),
'version': involvement.get('version'),
'role': involvement.get('role_id'),
'op': 'delete'
})
diff = {
item_type: [{
'taggroups': taggroups_diff,
'id': identifier,
'version': version
}]
}
if involvement_diff:
diff[item_type][0][other_item_type] = involvement_diff
return diff
def formdataToDiff(request, newform, itemType):
"""
Use the formdata captured on submission of the form and compare it with the
old version of an object to create a diff which allows to create/update the
object.
Approach: Loop all items of the newform and check if they are new or
changed compared to the oldform. If so, add them to diff and set their
values to null in the oldform. Any remaining items (with non-null values)
of oldform were deleted and need to be added to diff as well.
- newform: The form data captured on submission
- itemType: activities / stakeholders
"""
def _addInvolvementDictToList(invList, invDict):
"""
Append an involvement in dict-form to a list of involvements.
Add it only if it does not contain null values.
"""
if ('id' in invDict and invDict['id'] != colander.null
and 'version' in invDict and invDict['version'] != colander.null
and 'role_id' in invDict
and invDict['role_id'] != colander.null):
try:
int(invDict['role_id'])
invList.append(invDict)
except:
pass
return invList
def _findRemoveTgByCategoryThematicgroupTgid(
form, category, thematicgroup, tg_id, checkbox=False):
"""
Helper function to find a taggroup by its category, thematic group and
tg_id. If it was found, its values in the form are set to null and the
tags are returned.
"""
# Loop the categories of the form to find the one with the given id.
for (cat, thmgrps) in form.items():
if cat == category:
# Loop the thematic groups of the category to find the one with
# the given id.
for (thmgrp, tgroups) in thmgrps.items():
if thmgrp == thematicgroup:
# Loop the taggroups of the thematic group
for (tgroup, tags) in tgroups.items():
# Transform them all to lists to handle them all
# the same.
if not isinstance(tags, list):
tags = [tags]
# Look at each taggroup and check the tg_id
for t in tags:
if 'tg_id' in t and t['tg_id'] == tg_id:
ret = {}
for (k, v) in t.items():
# Copy the keys and values
ret[k] = v
# Set the values to null
t[k] = colander.null
return form, ret
elif checkbox is True and (
'tg_id' not in t
or t['tg_id'] == colander.null):
# Checkboxes: The actual taggroups are in a
# list one level further down
for (k, v) in t.items():
if isinstance(v, set):
# Turn sets into lists
v = list(v)
if not isinstance(v, list):
continue
for (value, taggroupid) in v:
if str(taggroupid) == str(tg_id):
# If the taggroup was found,
# remove it from the list.
v.remove((value, taggroupid))
if len(v) == 0:
# If there is no further
# taggroup in the list, set
# value of key to null.
t[k] = colander.null
return form, True # Return
return form, None
identifier = colander.null
version = colander.null
oldform = {}
if 'id' in newform:
# If the form contains an identifier, it is an edit of an existing item
identifier = newform['id']
del newform['id']
if 'version' in newform:
# If the form contains an identifier, it should also have a version
version = newform['version']
del newform['version']
if 'category' in newform:
# The category is not needed
del newform['category']
if 'itemType' in newform:
# ItemType is not needed
del newform['itemType']
if 'statusId' in newform:
# statusId is not needed
del newform['statusId']
if 'taggroup_count' in newform:
del newform['taggroup_count']
if identifier != colander.null and version != colander.null:
# Use the protocol to query the values of the version which was edited
if itemType == 'stakeholders':
protocol = StakeholderProtocol(DBSession)
else:
protocol = ActivityProtocol(DBSession)
item = protocol.read_one_by_version(
request, identifier, version, translate=False, geometry='full'
)
olditemjson = item.to_table(request)
# Look only at the values transmitted to the form and therefore visible
# to the editor.
oldform = getFormdataFromItemjson(request, olditemjson, itemType)
if 'id' in oldform:
del oldform['id']
if 'version' in oldform:
del oldform['version']
if 'category' in oldform:
del oldform['category']
if itemType == 'stakeholders':
# The form for Stakeholders has involvement fields in them which
# are only used for display purposes (since involvements can only
# be added on Activity side). We need to remove them before
# processing the diff.
if 'primaryinvestors' in oldform:
del oldform['primaryinvestors']
if 'secondaryinvestors' in oldform:
del oldform['secondaryinvestors']
for (oldcat, othmgrps) in oldform.items():
# It is not sure that all of the form is in the session (the
# newform variable). This is the case for example if a user did not
# open a category of the form when editing the item.
if oldcat not in newform:
# For each category which is not in the newform but in the old
# original form, copy the original to the newform.
for (oldthmgrp, oldtgroups) in othmgrps.items():
# Because the values in the newform are rendered when
# submitted by the form, some values of the original form
# (which was not captured) need to be adopted to match.
for (oldtgroup, oldtags) in oldtgroups.items():
if not isinstance(oldtags, list):
if 'tg_id' not in oldtags:
oldtags['tg_id'] = colander.null
for (oldkey, oldvalue) in oldtags.items():
if isinstance(oldvalue, list):
oldtags[oldkey] = set(oldvalue)
newform[oldcat] = othmgrps
categorylist = getCategoryList(request, itemType)
taggroupdiffs = []
involvementdiffs = []
geometrydiff = None
# Loop the newform to check if there are taggroups which changed or are new
# Loop the categories of the form
for (cat, thmgrps) in newform.items():
try:
thmgrpsitems = thmgrps.items()
except AttributeError:
continue
# Loop the thematic groups of the category // TODO: iterate over taggroup 12
for (thmgrp, tgroups) in thmgrpsitems:
if (thmgrp in categorylist.getInvolvementThematicgroupIds()
and itemType != 'stakeholders'):
# Important: Involvements can only be changed from the side of
# the activities!
# In a first step, collect all the involvements there are in
# the new form and the involvements that were in the oldform.
# Use them only if they have an id, a version and a role_id.
newInvolvements = []
oldInvolvements = []
for (tgrp, involvements) in tgroups.items():
if isinstance(involvements, dict):
newInvolvements = _addInvolvementDictToList(
newInvolvements, involvements)
elif isinstance(involvements, list):
for i in involvements:
newInvolvements = _addInvolvementDictToList(
newInvolvements, i)
# Collect the old involvement data from the original form with
# the same category and thematic group.
if (str(cat) in oldform and str(thmgrp) in oldform[str(cat)]):
oldInvgrp = oldform[str(cat)][str(thmgrp)]
for (invName, invData) in oldInvgrp.items():
if isinstance(invData, dict):
oldInvolvements = _addInvolvementDictToList(
oldInvolvements, invData)
elif isinstance(invData, list):
for i in invData:
oldInvolvements = _addInvolvementDictToList(
oldInvolvements, i)
# Loop the new involvements and try to find them in the old
# involvements (based on their identifier, version and
# role_id). If they are found, remove them from the list of
# old involvements. If they are not found, mark them as newly
# added.
for ni in newInvolvements:
found = False
for oi in oldInvolvements:
if (ni['id'] == oi['id']
and ni['version'] == oi['version']
and str(ni['role_id']) == str(oi['role_id'])):
found = True
oldInvolvements.remove(oi)
break
if found is False:
# Involvement is new
involvementdiffs.append({
'id': ni['id'],
'version': ni['version'],
'role': ni['role_id'],
'op': 'add'
})
# Loop the remaining old involvements and mark them as deleted.
for oi in oldInvolvements:
involvementdiffs.append({
'id': oi['id'],
'version': oi['version'],
'role': oi['role_id'],
'op': 'delete'
})
if thmgrp in categorylist.getMapThematicgroupIds():
# Map data
cfgThmg = categorylist.findThematicgroupById(thmgrp)
for (tgrp, map) in tgroups.items():
if tgrp != cfgThmg.getMap().getName():
continue
oldgeom = None
if (cat in oldform and thmgrp in oldform[cat]
and cfgThmg.getMap().getName()
in oldform[cat][thmgrp]):
oldgeom = oldform[cat][thmgrp][
cfgThmg.getMap().getName()]
oldgeometry = json.loads(oldgeom['geometry'])
geometry = (
map['geometry'] if 'geometry' in map
and map['geometry'] != colander.null else None)
if geometry is None:
continue
geometry = json.loads(geometry)
if oldgeom is None:
geometrydiff = geometry
continue
oldgeom = json.loads(oldgeom['geometry']) if 'geometry' in oldgeom else oldgeom
if json.dumps(oldgeom, sort_keys=True) != json.dumps(geometry, sort_keys=True):
geometrydiff = geometry
# Loop the tags of each taggroup
for (tgroup, tags) in tgroups.items():
# Transform all to list so they can be treated all the same
if not isinstance(tags, list):
tags = [tags]
for t in tags:
if 'tg_id' not in t:
continue
if t['tg_id'] != colander.null:
# Taggroup was there before because it contains a
# tg_id. Check if it contains changed values.
# Make a copy of the tags because the function to find
# and remove below modifies t.
t_copy = copy.copy(t)
# Try to find the taggroup by its tg_id in the oldform
oldform, oldtaggroup = \
_findRemoveTgByCategoryThematicgroupTgid(
oldform, cat, thmgrp, t['tg_id'])
if oldtaggroup is None:
# This should never happen since all tg_ids should
# be known.
log.debug(
'\n\n*** TG_ID NOT FOUND: When trying to find '
'and remove taggroup by tg_id (%s), the '
'taggroup was not found in the old form.\n\n'
% t['tg_id'])
continue
deletedtags = []
addedtags = []
for (k, v) in t_copy.items():
if type(v) == datetime.date \
or type(v) == datetime.datetime:
v = datetime.datetime.strftime(v, '%Y-%m-%d')
oldv = oldtaggroup[k] if k in oldtaggroup else None
if type(oldv) == datetime.date \
or type(oldv) == datetime.datetime:
oldv = datetime.datetime.strftime(
oldv, '%Y-%m-%d')
if (k != 'tg_id'):
if (oldv is not None and
str(v) != str(oldv)
and v != colander.null):
# Because the form renders values as
# floats, it is important to compare them
# correctly with an integer value
try:
if float(oldv) == float(v):
continue
except ValueError:
pass
except TypeError:
pass
# If a key is there in both forms but its
# value changed, add it once as deleted and
# once as added.
deletedtags.append({
'key': k,
'value': oldv
})
addedtags.append({
'key': k,
'value': v
})
elif (k not in oldtaggroup
and v != colander.null):
# If a key was not there in the oldform,
# add it as added.
addedtags.append({
'key': k,
'value': v
})
elif (k in oldtaggroup
and str(v) !=
str(oldtaggroup[k])
and v == colander.null):
# If a key was in the oldform but not in
# the new one anymore, add it as deleted.
oldv = oldtaggroup[k]
if type(oldv) == datetime.date or type(
oldv) == datetime.datetime:
oldv = datetime.datetime.strftime(
oldv, '%Y-%m-%d')
deletedtags.append({
'key': k,
'value': oldtaggroup[k]
})
# Put together diff for the taggroup
if len(deletedtags) > 0 or len(addedtags) > 0:
tagdiffs = []
for dt in deletedtags:
del(oldtaggroup[dt['key']])
tagdiffs.append({
'key': dt['key'],
'value': dt['value'],
'op': 'delete'
})
for at in addedtags:
tagdiffs.append({
'key': at['key'],
'value': at['value'],
'op': 'add'
})
tgdiff = {
'tg_id': t['tg_id'],
'tags': tagdiffs
}
# If there are no tags left in the old taggroup,
# mark the entire taggroup diff to be deleted.
del(oldtaggroup['tg_id'])
if len(addedtags) == 0 and len(deletedtags) > 0 \
and oldtaggroup == {}:
tgdiff['op'] = 'delete'
taggroupdiffs.append(tgdiff)
else:
# Taggroup has no tg_id. It is either a new taggroup to
# be added (if it contains values) or it may be a group
# of checkboxes (with tg_ids a level lower)
# For Checkboxes: Values cannot really change, they can
# only be added (if another checkbox is selected, a new
# taggroup is created). If a checkbox was submitted
# with a tg_id, it was there before already.
addedtags = []
addedtaggroups = []
for (k, v) in t.items():
if (k != 'tg_id' and v != colander.null):
if isinstance(v, set):
# If the value is a set, the form displayed
# a group of checkboxes. Each of the values
# is treated as a separate taggroup.
for (value, tg_id) in v:
# Since we transformed all values to
# unicode, also 'None' is a string now.
if tg_id is None or tg_id == 'None':
# No tg_id, treat it as a new
# taggroup
addedtaggroups.append({
'op': 'add',
'tags': [{
'key': k,
'value': value,
'op': 'add'
}],
'main_tag': {
'key': k,
'value': value
}
})
else:
# Try to find and remove the
# taggroup in the old form
oldform, oldtaggroup = \
_findRemoveTgByCategoryThematicgroupTgid(
oldform, cat, thmgrp,
tg_id, True)
if oldtaggroup is None:
# This basically should not
# happen because the tg_id
# always should be found.
log.debug(
'\n\n*** TG_ID NOT FOUND: '
'When trying to find and '
'remove taggroup by tg_id '
'(%s), the taggroup was '
'not found in the old '
'form.\n\n' % tg_id)
else:
# Write dates as string
if type(v) == datetime.date or \
type(v) == datetime.datetime:
v = datetime.datetime.strftime(
v, '%Y-%m-%d')
# No checkbox, simply a new tag
addedtags.append({
'key': k,
'value': v
})
# For checkboxes, the diff is already a taggroup.
if len(addedtaggroups) > 0:
for tg in addedtaggroups:
taggroupdiffs.append(tg)
# Put together diff for taggroup
elif len(addedtags) > 0:
# Newly added taggroups need to have a valid
# main_tag. We need to find out the main_tag of the
# current taggroup for the diff
cCat = categorylist.findCategoryById(cat)
if cCat is None:
continue
cThmg = cCat.findThematicgroupById(thmgrp)
if cThmg is None:
continue
cTg = cThmg.findTaggroupById(tgroup)
if cTg is None:
continue
mainkey = cTg.getMaintag().getKey().getName()
if mainkey is None or mainkey not in t:
continue
mainvalue = t[mainkey]
if mainvalue is None:
continue
# Store date maintags also as string
if type(mainvalue) == datetime.date or type(
mainvalue) == datetime.datetime:
mainvalue = datetime.datetime.strftime(
mainvalue, '%Y-%m-%d')
tagdiffs = []
for at in addedtags:
tagdiffs.append({
'key': at['key'],
'value': at['value'],
'op': 'add'
})
taggroupdiffs.append({
'op': 'add',
'tags': tagdiffs,
'main_tag': {
'key': mainkey,
'value': mainvalue
}
})
# Loop the oldform to check if there are any tags remaining which have
# values in them, meaning that they are not in the newform anymore and are
# therefore to be deleted.
# Loop the categories of the form
for (cat, thmgrps) in oldform.items():
# Loop the thematic groups of the category
for (thmgrp, tgroups) in thmgrps.items():
# Loop the tags of each taggroup
for (tgroup, tags) in tgroups.items():
# Transform all to list so they can be treated all the same
if not isinstance(tags, list):
tags = [tags]
for t in tags:
if 'tg_id' in t and t['tg_id'] != colander.null:
deletedtags = []
for (k, v) in t.items():
if type(v) == datetime.date or \
type(v) == datetime.datetime:
v = datetime.datetime.strftime(v, '%Y-%m-%d')
if (k != 'tg_id' and v != colander.null):
deletedtags.append({
'key': k,
'value': v
})
if len(deletedtags) > 0:
tagdiffs = []
for dt in deletedtags:
tagdiffs.append({
'key': dt['key'],
'value': dt['value'],
'op': 'delete'
})
taggroupdiffs.append({
'op': 'delete',
'tg_id': t['tg_id'],
'tags': tagdiffs
})
else:
# Checkbox: Look one level deeper
for (k, v) in t.items():
if not isinstance(v, list):
continue
for (value, taggroupid) in v:
# Add it directly to taggroups
taggroupdiffs.append({
'op': 'delete',
'tg_id': taggroupid,
'tags': [{
'key': k,
'value': value,
'op': 'delete'
}]
})
ret = None
if (len(taggroupdiffs) > 0 or len(involvementdiffs) > 0
or geometrydiff is not None):
itemdiff = {}
if len(taggroupdiffs) > 0:
itemdiff['taggroups'] = taggroupdiffs
if len(involvementdiffs) > 0:
kw = 'activities' if itemType == 'stakeholders' else 'stakeholders'
itemdiff[kw] = involvementdiffs
if geometrydiff is not None:
itemdiff['geometry'] = geometrydiff
itemdiff['version'] = version if version is not colander.null else 1
if identifier is not colander.null:
itemdiff['id'] = identifier
ret = {}
ret[itemType] = [itemdiff]
return ret
def getFormdataFromItemjson(
request, itemJson, itemType, category=None, **kwargs):
"""
Use the JSON representation of a feature (Activity or Stakeholder) to get
the values in a way the form can handle to display it. This can be used to
display a form with some values already filled out to edit an existing
Activity or Stakeholder.
The values of the form depend on the configuration yaml. If a Tag is not
found there, it is ignored and not returned by this function.
- itemjson: The JSON representation of an object. This should only be
exactly 1 version of an item (starting with {'activities': {...}} or
{'stakeholders': {...}}
- itemType: activities / stakeholders
"""
readOnly = kwargs.get('readOnly', False)
compareFeature = kwargs.get('compareFeature', None)
if compareFeature is not None:
if itemType == 'activities':
review = ActivityReview(request)
else:
review = StakeholderReview(request)
mapAdded = False
if itemType == 'activities':
otherItemType = 'stakeholders'
else:
otherItemType = 'activities'
# TODO: Translation
unknownString = 'Unknown'
def _getInvolvementData(involvementData, keyNames):
"""
Helper function to extract the involvement data needed for the display
fields of the involvement overview.
"""
if involvementData is None or 'data' not in involvementData:
return None
data = involvementData['data']
if 'taggroups' not in data:
return None
# Set them to null by default
fields = {}
for keyName in keyNames:
fields[keyName] = unknownString
for tg in data['taggroups']:
if 'main_tag' not in tg or tg['main_tag'] is None:
continue
maintag = tg['main_tag']
for f in fields:
if ('key' in maintag and 'value' in maintag
and maintag['key'] == f):
fields[f] = maintag['value']
fields['id'] = data['id']
fields['version'] = involvementData['version']
fields['role_id'] = involvementData['role_id']
if compareFeature is not None:
reviewable = 0
inv = compareFeature.find_involvement_by_guid(data['id'])
# Check if the involvement is reviewable. This is only important if
# the version on the other side is pending or edited.
if inv is not None and inv.get_status() in [1, 6]:
reviewable = review._review_check_involvement(
inv._feature.getMappedClass(), inv._feature.get_guid(),
inv._feature.get_version())
fields['reviewable'] = reviewable
return fields
# Get the list of categories (needed to validate the tags)
categorylist = getCategoryList(request, itemType)
validJsonErrors = checkValidItemjson(categorylist, itemJson, output='list')
if len(validJsonErrors) > 0:
return {}
data = {
'id': itemJson['id'],
'version': itemJson['version'],
'category': category
}
if ('involvements' in itemJson and (
category is None or
str(category) in categorylist.getInvolvementCategoryIds())):
# Have a look at the involvements
involvements = itemJson['involvements']
otherCategoryList = getCategoryList(request, otherItemType)
invOverviewKeys = [
k[0] for k in otherCategoryList.getInvolvementOverviewKeyNames()]
for inv in involvements:
if 'role_id' not in inv:
continue
invCat, invThmg = categorylist.getGroupsByRoleId(inv['role_id'])
if invCat is None or invThmg is None:
continue
invConfig = invThmg.getInvolvement()
invData = _getInvolvementData(inv, invOverviewKeys)
if 'reviewable' in invData:
# For multiple involvements, do not always overwrite the flag
# whether an involvement is reviewable or not. As error
# messages have a negative code, use the minimal error code of
# all involvements.
if 'reviewable' in data:
data['reviewable'] = min(
data['reviewable'], invData['reviewable'])
else:
data['reviewable'] = invData['reviewable']
if readOnly and 'role_id' in invData:
# For readonly forms, we need to populate the role_name with
# the name of the Stakeholder_Role
invRole = invConfig.findRoleById(invData['role_id'])
if invRole is not None:
invData['role_name'] = invRole.getName()
if str(invCat.getId()) not in data:
data[str(invCat.getId())] = {}
dataCat = data[str(invCat.getId())]
if str(invThmg.getId()) not in dataCat:
dataCat[str(invThmg.getId())] = {}
dataThmg = dataCat[str(invThmg.getId())]
if invConfig.getRepeatable() is True:
if invConfig.getName() in dataThmg and len(
dataThmg[invConfig.getName()]) > 0:
dataThmg[invConfig.getName()].append(invData)
else:
dataThmg[invConfig.getName()] = [invData]
else:
dataThmg[invConfig.getName()] = invData
for taggroup in itemJson['taggroups']:
# Get the category and thematic group based on the maintag
mt = taggroup['main_tag']
if mt is None:
# If the maintag is empty, move on and do not add it to the form
continue
cat, thmg, tg = categorylist. \
findCategoryThematicgroupTaggroupByMainkey(mt['key'])
# Treat the id's all as strings
cat = str(cat)
thmg = str(thmg)
if tg is None:
# If the Form Taggroup for this maintag was not found, move on and
# do not add it to form
continue
tgid = str(tg.getId())
maintag = tg.getMaintag()
if maintag.getKey().getType().lower() in ['checkbox', 'inputtoken']:
# Checkboxes are represented as a list of tuples containing their
# names and tg_id's.
tagsdata = {}
else:
# Prepare the data of the tags
tagsdata = {'tg_id': taggroup['tg_id']}
for t in taggroup['tags']:
# Add the tag only if the key exists in this taggroup
if tg.hasKey(t['key']):
configTag = categorylist.findTagByKeyName(t['key'])
v = t['value']
if readOnly is True and configTag is not None:
# If the form is rendered for readOnly, use the translated
# value (of checkboxes, dropdowns etc.) if there is one.
configValue = configTag.findValueByName(v)
if configValue is not None:
v = configValue.getTranslation()
if maintag.getKey().getType().lower() in [
'checkbox', 'inputtoken']:
# Checkboxes: List of tuples with name and tg_id
tagsdata[t['key']] = [(v, taggroup['tg_id'])]
elif (configTag is not None
and configTag.getKey().getType().lower() == 'date'):
try:
d = datetime.datetime.strptime(v, '%Y-%m-%d')
tagsdata[t['key']] = d
except ValueError:
pass
else:
tagsdata[t['key']] = v
if 'geometry' in taggroup:
#tagsdata['geometry'] = taggroup['geometry']
tagsdata[tg.map.name] = {'geometry': taggroup['geometry']} # geometry values of polygons are added to edit session
if tg.getRepeatable():
tagsdata = [tagsdata]
if cat in data:
# Category already exists, check thematic group
if thmg in data[cat]:
# Thematic group already exists, check taggroup
if tgid in data[cat][thmg]:
# Taggroup already exists. This should only happen if
# taggroup is reapeatable or if the tags are checkboxes.
# In this case add taggroup to the array of taggroups
if tg.getRepeatable():
# Repeatable: Add the data to the list of taggroups
data[cat][thmg][tgid].append(tagsdata[0])
elif (maintag.getKey().getType().lower() in [
'checkbox', 'inputtoken']
and t['key'] in data[cat][thmg][tgid]):
# Checkboxes: Add the data to the list of tuples
data[cat][thmg][tgid][t['key']] += tagsdata[t['key']]
else:
log.debug(
'DUPLICATE TAGGROUP: Taggroup %s in thematic group'
' %s and category %s appears twice although it is '
'not repeatable!' % (tgid, thmg, cat))
else:
# Taggroup does not exist yet, tags can be added
data[cat][thmg][tgid] = tagsdata
else:
# Thematic group does not exist yet, taggroup and tags can be
# added
data[cat][thmg] = {tgid: tagsdata}
else:
# Category does not exist yet, thematic group and taggroup and tags
# can be added
# Add the category only if the category is to be visible in the
# current form.
if category is None or cat == str(category):
data[cat] = {thmg: {tgid: tagsdata}}
# Map: Look only if the category contains a thematic group which has a
# map.
if (cat in categorylist.getMapCategoryIds()
and thmg in categorylist.getMapThematicgroupIds()):
# Make sure all the necessary values are there and add it only
# once.
# TODO: The parameter 'map' is defined in the yaml (map: map) and
# therefore rather static. Should this be made more dynamic?
if (cat in data and thmg in data[cat]
and 'map' not in data[cat][thmg]
and 'geometry' in itemJson):
mapAdded = True
data[cat][thmg]['map'] = {
'geometry': json.dumps(itemJson['geometry'])
}
# Map
if (category is not None
and str(category) in categorylist.getMapCategoryIds()
and mapAdded is False):
# The current category contains a map which has not yet been added to
# the form data. This may be the case if there are no other taggroups
# in this category or thematic group filled out.
cat = categorylist.findCategoryById(category)
catId = str(cat.getId())
if catId not in data:
data[catId] = {}
thematicgroup = None
for thmg in categorylist.getMapThematicgroupIds():
if cat.findThematicgroupById(thmg) is not None:
thematicgroup = cat.findThematicgroupById(thmg)
break
if thematicgroup is not None and 'geometry' in itemJson:
thmgId = str(thematicgroup.getId())
if thmgId not in data[catId]:
data[catId][thmgId] = {}
data[catId][thmgId]['map'] = {
'geometry': json.dumps(itemJson['geometry'])
}
log.debug('Formdata created by ItemJSON: %s' % data)
return data
def checkValidItemjson(categorylist, itemJson, output='dict'):
validMainkeys = categorylist.getAllMainkeyNames()
taggroups = itemJson['taggroups']
errors = []
for taggroup in taggroups:
maintag = taggroup['main_tag']
# Make sure the maintag exists and contains values
if maintag is None or 'key' not in maintag or maintag['key'] is None:
errors.append(
'Undefined Maintag: Maintag of taggroup %s is not defined or '
'has no values.' % taggroup)
continue
# Make sure that the maintag is in the list of valid maintags according
# to the configuration
if maintag['key'] not in validMainkeys:
errors.append(
'Invalid Maintag: Maintag (%s) of taggroup %s is not a valid '
'maintag according to the configuration.' % (
maintag['key'], taggroup))
# Make sure that the taggroup contains only one mainkey according to
# the configuration
keys = []
for tag in taggroup['tags']:
keys.append(tag['key'])
mainkeys = []
for k in keys:
if k in validMainkeys:
mainkeys.append(k)
if len(mainkeys) > 1:
errors.append(
'Too many Mainkeys: The taggroup %s should contain only 1 '
'mainkey according to the configuration. It contains %s: %s'
% (taggroup, len(mainkeys), ', '.join(mainkeys)))
# Make sure that all the tags are valid keys in the same taggroup
# according to the configuration
if len(mainkeys) == 1:
catId, thgId, confTaggroup = categorylist. \
findCategoryThematicgroupTaggroupByMainkey(maintag['key'])
if confTaggroup is not None:
for k in keys:
if confTaggroup.hasKey(k) is False:
errors.append(
'Wrong key in taggroup: The key %s is not valid in'
' a taggroup with mainkey %s' % (
k, maintag['key']))
if len(errors) > 0:
log.debug(
'\n\n==================================\nThe itemJson is not valid'
' according to the yaml configuration. The following errors exist:'
'\n** FORM ERROR ** %s\n==================================\n\n'
% '\n** FORM ERROR ** '.join(errors))
if output == 'dict':
ret = {'errors': errors}
if len(errors) > 0:
ret['itemJson is valid'] = False
ret['errorCount'] = len(errors)
else:
ret['itemJson is valid'] = True
return ret
elif output == 'list':
return errors
return None
def doClearFormSessionData(request, item, attr):
"""
Function to clear the session of any form-related data.
"""
# Clear the session of any form data
if item in request.session and attr in request.session[item]:
del(request.session[item][attr])
def addCreatedInvolvementToSession(
request, session, itemType, invName, created):
"""
Add a newly created Involvement to the session so it is accessible when
switching back to the original form.
"""
if itemType not in session or 'form' not in session[itemType] \
or invName is None:
return False
configList = getCategoryList(request, itemType)
cat = configList.findCategoryByInvolvementName(invName)
if cat is None or str(cat.getId()) not in session[itemType]['form']:
return False
sessionCat = session[itemType]['form'][str(cat.getId())]
thmg = configList.findThematicgroupByInvolvementName(invName)
if thmg is None or str(thmg.getId()) not in sessionCat:
return False
sessionThmg = sessionCat[str(thmg.getId())]
if invName not in sessionThmg:
return False
sessionInv = sessionThmg[invName]
newInv = created
# Add a role to the new involvement. By default, use the first one
# available
configInv = thmg.getInvolvement()
configRoles = configInv.getRoles()
if len(configRoles) < 1:
return False
newInv['role_id'] = configRoles[0].getId()
if isinstance(sessionInv, dict):
# The involvemnt is not repeatable, there is only one which is to be
# replaced
sessionInv = newInv
elif isinstance(sessionInv, list):
# The involvements are repeatable.
invAdded = False
for i, inv in enumerate(sessionInv):
# Try to use the first empty entry (no version or id)
if inv['version'] != colander.null or inv['id'] != colander.null:
continue
sessionInv[i] = newInv
invAdded = True
if invAdded is False:
# If the involvement was not added (because no empty entry was
# found), add it to the end of the list
sessionInv.append(newInv)
else:
return False
# Update the session
session[itemType]['form'][str(cat.getId())][str(thmg.getId())][invName] = \
sessionInv
log.debug('Added involvement to session: %s' % newInv)
return True
def mergeFormdata(ref, new):
"""
Merge two formdatas to create a single formdata which can be used in a
compareForm as rendered by the function renderReadonlyCompareForm.
The formdata has the following structure:
'1': {
'5': {
'ref_1': [
{
'tg_id': 0,
'[A] Integerfield 1': 1,
'[A] Numberfield 2': 2,
'change': 'change'
}
],
'new_1': [
{
'tg_id': 0,
'[A] Integerfield 1': 3,
'[A] Numberfield 2': 4,
'change': 'change'
}
],
'change': 'change'
},
'change': 'change'
}
"""
def _addPrefixToEachTaggroup(data, prefix):
"""
Adds a prefix to each taggroup in the form:
[PREFIX]_[ID]
"""
new_data = {}
for cat_id, cat in data.items():
if cat_id in ['category', 'version', 'id', 'reviewable']:
continue
new_cat = {}
for thmg_id, thmg in cat.items():
new_thmg = {}
for tg_id, tg in thmg.items():
new_thmg['%s_%s' % (prefix, tg_id)] = tg
new_cat[thmg_id] = new_thmg
new_data[cat_id] = new_cat
return new_data
def _mergeDicts(a, b, path=None):
"""
Merge one dict in another.
http://stackoverflow.com/a/7205107/841644
"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
_mergeDicts(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
raise Exception(
'Conflict at %s' % '.'.join(path + [str(key)]))
else:
a[key] = b[key]
return a
# Mark all taggroups of the two versions with 'ref' or 'new' respectively.
# Then merge the two dicts.
merged = _mergeDicts(
_addPrefixToEachTaggroup(ref, 'ref'),
_addPrefixToEachTaggroup(new, 'new'))
# Mark each thematicgroup and category which have changes in them. Also
# make sure that each taggroups missing in one version receive a flag so
# they are displayed as well in the form table.
geomChanged = False
for cat_id, cat in merged.items():
catChanged = False
for thmg_id, thmg in cat.items():
thmgChanged = False
missingTgs = []
for tg_id, tg in thmg.items():
prefix, id = tg_id.split('_')
if prefix == 'ref':
otherTaggroup = '%s_%s' % ('new', id)
else:
otherTaggroup = '%s_%s' % ('ref', id)
if otherTaggroup not in thmg:
missingTgs.append((otherTaggroup, tg))
if isinstance(tg, dict):
changed = False
if otherTaggroup not in thmg:
# Taggroup did not exist previously
changed = True
else:
# Check contents of taggroup to see if it changed
d = DictDiffer(tg, thmg[otherTaggroup])
diff = d.added().union(d.removed()).union(d.changed())
if 'reviewable' in diff:
diff.remove('reviewable')
if 'change' in diff:
diff.remove('change')
if 'geometry' in diff:
geomChanged = True
diff.remove('geometry')
if len(diff) > 0:
changed = True
if changed is True:
tg['change'] = 'change'
# Changes in the map "taggroup" should not mark the
# whole thematic group as changed.
if id != 'map':
thmgChanged = True
elif isinstance(tg, list):
if otherTaggroup not in thmg:
for t in tg:
t['change'] = 'change'
thmgChanged = True
else:
for t in tg:
changed = True
for ot in thmg[otherTaggroup]:
d = DictDiffer(t, ot)
diff = d.added().union(d.removed()).union(
d.changed())
if 'reviewable' in diff:
diff.remove('reviewable')
if 'change' in diff:
diff.remove('change')
if 'geometry' in diff:
geomChanged = True
diff.remove('geometry')
if len(diff) == 0:
changed = False
if changed is True:
t['change'] = 'change'
thmgChanged = True
for missingTaggroup, oldTg in missingTgs:
prefix, id = missingTaggroup.split('_')
if isinstance(oldTg, dict):
thmg[missingTaggroup] = {'change': 'change'}
elif isinstance(oldTg, list):
thmg[missingTaggroup] = [{'change': 'change'}]
if id == 'map' or 'geometry' in oldTg:
geomChanged = True
if thmgChanged is True:
thmg['change'] = 'change'
catChanged = True
if catChanged is True:
cat['change'] = 'change'
if ref == {}:
# Special case: If there is no previous version, it is assumed that the
# geometry has changed in any case.
geomChanged = True
merged['geomchange'] = 'change' if geomChanged is True else ''
log.debug('Merged formdata: %s' % merged)
return merged
class DictDiffer(object):
"""
Thanks to http://stackoverflow.com/a/1165552/841644
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(current_dict.keys()), set(
past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect
if self.past_dict[o] != self.current_dict[o])
def unchanged(self):
return set(o for o in self.intersect
if self.past_dict[o] == self.current_dict[o])
def get_main_keys_from_item_json(item_json):
"""
Returns the keys of all main tags found in the complete json (a
Python dict) of an Activity or a Stakeholder as created by the
Protocol.
Args:
item_json (dict): The complete json of an Activity or a
Stakeholder as created by the Protocol.
Returns:
list. A list with all main keys of the item json.
"""
main_keys = []
if not isinstance(item_json, dict):
return main_keys
for taggroup in item_json.get('taggroups', []):
main_key = taggroup.get('main_tag', {}).get('key', None)
if main_key:
main_keys.append(main_key)
return main_keys
def get_value_by_key_from_taggroup_json(taggroup_json, key):
if not isinstance(taggroup_json, dict):
return None
found_tag = next((
tag for tag in taggroup_json.get('tags', []) if tag['key'] == key),
None)
if found_tag:
return found_tag['value']
return None
def get_value_by_key_from_item_json(item_json, key):
"""
Returns the value of a tag found by its key in the complete json
(a Python dict) of an Activity or a Stakeholder as created by the
Protocol.
.. important::
This function only returns the value of the first occurence of
the key. However, there may be further Taggroups containing the
same key.
Args:
item_json (dict): The complete json of an Activity or a
Stakeholder as created by the Protocol.
key (str): The key to search inside the tags.
Returns:
str or None. The value of the tag if found, None if the key was
not found or if the json is invalid.
"""
if not isinstance(item_json, dict):
return None
for taggroup in item_json.get('taggroups', []):
found_tag = next((
tag for tag in taggroup.get('tags', []) if tag['key']
== key), None)
if found_tag:
return found_tag['value']
return None
|
CDE-UNIBE/lokp
|
lokp/utils/form.py
|
Python
|
gpl-3.0
| 59,621
|
# -*- coding: utf-8
# pylint: disable=line-too-long
import os
import hashlib
import gzip
import shutil
import anvio
import anvio.db as db
import anvio.tables as t
import anvio.utils as utils
import anvio.hmmops as hmmops
import anvio.terminal as terminal
import anvio.constants as constants
import anvio.filesnpaths as filesnpaths
from anvio.drivers.hmmer import HMMer
from anvio.tables.tableops import Table
from anvio.parsers import parser_modules
from anvio.dbops import ContigsSuperclass
from anvio.errors import ConfigError, StupidHMMError
from anvio.tables.genecalls import TablesForGeneCalls
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "A. Murat Eren"
__email__ = "a.murat.eren@gmail.com"
__status__ = "Development"
run = terminal.Run()
progress = terminal.Progress()
pp = terminal.pretty_print
class TablesForHMMHits(Table):
def __init__(self, db_path, num_threads_to_use=1, run=run, progress=progress, initializing_for_deletion=False, just_do_it=False,
hmm_program_to_use='hmmscan', hmmer_output_directory=None, get_domain_table_output=False):
self.num_threads_to_use = num_threads_to_use
self.db_path = db_path
self.just_do_it = just_do_it
self.hmm_program = hmm_program_to_use or 'hmmscan'
self.hmmer_output_dir = hmmer_output_directory
self.hmmer_desired_output = ('table', 'domtable') if get_domain_table_output else 'table'
utils.is_contigs_db(self.db_path)
filesnpaths.is_program_exists(self.hmm_program)
self.contigs_db_hash = db.DB(self.db_path, utils.get_required_version_for_db(self.db_path)).get_meta_value('contigs_db_hash')
Table.__init__(self, self.db_path, anvio.__contigs__version__, run, progress)
self.init_gene_calls_dict()
if not len(self.gene_calls_dict):
if self.genes_are_called:
self.run.warning("Tables in this contigs database that should contain gene calls are empty despite the fact that "
"you didn't skip the gene calling step while generating this contigs database. This probably means "
"that the gene caller did not find any genes among contigs. This is OK for now. But might explode "
"later. If it does explode and you decide to let us know about that problem, please remember to mention "
"this warning. By the way, this warning probably has been seen by like only 2 people on the planet. Who "
"works with contigs with no gene calls? A better implementation of anvi'o will unite researchers who "
"study weird stuff.")
else:
self.run.warning("It seems you have skipped gene calling step while generating your contigs database, and you have no "
"genes calls in tables that should contain gene calls. Anvi'o will let you go with this since some HMM "
"sources only operate on DNA sequences, and at this point it doesn't know which HMMs you wish to run. "
"If the lack of genes causes a problem, you will get another error message later probably :/")
if not initializing_for_deletion:
self.set_next_available_id(t.hmm_hits_table_name)
def check_sources(self, sources):
sources_in_db = list(hmmops.SequencesForHMMHits(self.db_path).hmm_hits_info.keys())
if 'Ribosomal_RNAs' in sources_in_db and len([s for s in sources if s.startswith('Ribosomal_RNA_')]):
raise ConfigError("Here is one more additional step we need to you take care of before we can go forward: Your contigs database "
"already contains HMMs from an older `Ribosomal_RNAs` model anvi'o no longer uses AND you are about to run "
"its newer models that do the same thing (but better). Since Ribosomal RNA models add new gene calls to the "
"database, running newer models without first cleaning up the old ones will result in duplication of gene calls "
"as examplified here: https://github.com/merenlab/anvio/issues/1598. Anvi'o could've removed the `Ribosomal_RNAs` "
"model for you automatically, but the wisdom tells us that the person who passes the sentence should swing the "
"sword. Here it is for your grace: \"anvi-delete-hmms -c CONTIGS.db --hmm-source Ribosomal_RNAs\".")
sources_need_to_be_removed = set(sources.keys()).intersection(sources_in_db)
if len(sources_need_to_be_removed):
if self.just_do_it:
for source_name in sources_need_to_be_removed:
self.remove_source(source_name)
else:
raise ConfigError("Some of the HMM sources you wish to run on this database are already in the database and anvi'o "
"refuses to overwrite them without your explicit input. You can either use `anvi-delete-hmms` "
"to remove them first, or run this program with `--just-do-it` flag so anvi'o would remove all "
"for you. Here are the list of HMM sources that need to be removed: '%s'." % (', '.join(sources_need_to_be_removed)))
def hmmpress_sources(self, sources, tmp_dir):
"""This function runs hmmpress on the hmm profiles.
It returns the locations of each hmmpressed file path in a dictionary keyed by the source.
"""
hmmpressed_file_paths = {}
for source in sources:
model_file = sources[source]['model']
hmm_file_path = os.path.join(tmp_dir, source + '.hmm')
hmm_file = open(hmm_file_path, 'wb')
hmm_file.write(gzip.open(model_file, 'rb').read())
hmm_file.close()
log_file_path = log_file_path = os.path.join(tmp_dir, 'hmmpress.log')
cmd_line = ['hmmpress', hmm_file_path]
ret_val = utils.run_command(cmd_line, log_file_path)
hmmpressed_file_paths[source] = hmm_file_path
if ret_val:
raise ConfigError("Sadly, anvi'o failed while attempting to compress the HMM model for source %s. You can check out the log file (%s) for "
"more detailed information on why this happened." % (source, log_file_path))
return hmmpressed_file_paths
def populate_search_tables(self, sources={}):
# make sure the output file is OK to write.
filesnpaths.is_output_file_writable(self.db_path, ok_if_exists=True)
# if we end up generating a temporary file for amino acid sequences:
if not len(sources):
import anvio.data.hmm
sources = anvio.data.hmm.sources
if not sources:
return
self.check_sources(sources)
target_files_dict = {}
tmp_directory_path = filesnpaths.get_temp_directory_path()
hmmpressed_files = self.hmmpress_sources(sources, tmp_directory_path)
self.run.info("Contigs DB", self.db_path)
self.run.info("HMM sources", ', '.join(sources.keys()))
# here we will go through targets and populate target_files_dict based on what we find among them.
targets = set([s['target'] for s in list(sources.values())])
have_hmm_sources_with_non_RNA_contig_context = False
for target in targets:
alphabet, context = utils.anvio_hmm_target_term_to_alphabet_and_context(target)
if not self.genes_are_called and context != "CONTIG":
raise ConfigError(f"You are in trouble. The gene calling was skipped for this contigs database, yet anvi'o asked to run an "
f"HMM profile that wishes to operate on {context} context using the {alphabet} alphabet. It is not OK. You "
f"still could run HMM profiles that does not require gene calls to be present (such as the HMM profile that "
f"identifies Ribosomal RNAs in contigs, but for that you would have to explicitly ask for it by using the "
f"additional parameter '--installed-hmm-profile PROFILE_NAME_HERE').")
self.run.info('Alphabet/context target found', '%s:%s' % (alphabet, context))
if context == 'CONTIG' and alphabet != 'RNA':
have_hmm_sources_with_non_RNA_contig_context =True
class Args: pass
args = Args()
args.contigs_db = self.db_path
contigs_db = ContigsSuperclass(args, r=terminal.Run(verbose=False))
if context == 'GENE':
target_file_path = os.path.join(tmp_directory_path, f'{alphabet}_gene_sequences.fa')
contigs_db.get_sequences_for_gene_callers_ids(output_file_path=target_file_path,
simple_headers=True,
rna_alphabet=True if alphabet=='RNA' else False,
report_aa_sequences=True if alphabet=='AA' else False)
target_files_dict[f'{alphabet}:GENE'] = target_file_path
elif context == 'CONTIG':
if alphabet == 'AA':
raise ConfigError("You are somewhere you shouldn't be. You came here because you thought it would be OK "
"to ask for AA sequences in the CONTIG context. The answer to that is 'no, thanks'. If "
"you think this is dumb, please let us know.")
else:
target_file_path = os.path.join(tmp_directory_path, f'{alphabet}_contig_sequences.fa')
utils.export_sequences_from_contigs_db(self.db_path,
target_file_path,
rna_alphabet=True if alphabet=='RNA' else False)
target_files_dict[f'{alphabet}:CONTIG'] = target_file_path
if have_hmm_sources_with_non_RNA_contig_context:
# in that case, we should remind people what's up.
self.run.warning("The HMM profiles that are about to be run includes at least one HMM profile that runs on "
"contigs and not genes. Thus, this HMM operation will not be working with gene calls anvi'o "
"already knows about. Which means, the resulting hits will need to be added as 'new gene calls' "
"into the contigs database. So far so good. But because we are in the realm of contigs rather "
"than genes, the resulting HMM hits will unlikely correspond to open reading frames that are "
"supposed to be translated (such as ribosomal RNAs). While anvi'o adds new gene calls to your "
"contigs database for these hits, it will NOT report amino acid sequences for the "
"new gene calls that will emerge from these HMMs, expecting you to judge whether this will "
"influence your pangenomic analyses or other things you thought you would be doing with the "
"result of this HMM search downstream. If you do not feel like being the judge of anything today "
"you can move on yet remember to remember this if things look somewhat weird later on.",
header="THE MORE YOU KNOW 🌈", lc="green")
commander = HMMer(target_files_dict, num_threads_to_use=self.num_threads_to_use, program_to_use=self.hmm_program)
for source in sources:
alphabet, context = utils.anvio_hmm_target_term_to_alphabet_and_context(sources[source]['target'])
if alphabet in ['DNA', 'RNA'] and 'domtable' in self.hmmer_desired_output:
raise ConfigError(f"Domain table output was requested (probably with the --get-domtable-output flag, "
f"does that look familiar?) but unfortunately this option is incompatible with the "
f"current source of HMM profiles, {source}, because this source uses a nucleotide "
f"alphabet.")
kind_of_search = sources[source]['kind']
domain = sources[source]['domain']
all_genes_searched_against = sources[source]['genes']
hmm_model = hmmpressed_files[source]
reference = sources[source]['ref']
noise_cutoff_terms = sources[source]['noise_cutoff_terms']
hmmer_output = commander.run_hmmer(source,
alphabet,
context,
kind_of_search,
domain,
len(all_genes_searched_against),
hmm_model,
reference,
noise_cutoff_terms,
desired_output=self.hmmer_desired_output,
hmmer_output_dir=self.hmmer_output_dir)
if self.hmmer_output_dir:
self.run.info("HMMER output directory", self.hmmer_output_dir)
if not isinstance(hmmer_output, tuple):
hmm_scan_hits_txt = hmmer_output
else:
hmm_scan_hits_txt,domain_hits_txt = hmmer_output
self.run.info("Domain table output", domain_hits_txt)
if not hmm_scan_hits_txt:
search_results_dict = {}
else:
try:
parser = parser_modules['search']['hmmer_table_output'](hmm_scan_hits_txt, alphabet=alphabet, context=context, program=self.hmm_program)
except StupidHMMError as e:
raise ConfigError(f"Unfortunately something went wrong while anvi'o was trying to parse some HMM output for your data. "
f"This error is typically due to contig names that are long and variable in length, which that "
f"confuses HMMER and so it generates output tables that are simply unparseable. Anvi'o does its best, "
f"but occasionally fails, which leads to this error. If you are curious why is this happening, you can take a "
f"look at this issue where this issue is described: https://github.com/merenlab/anvio/issues/1564. "
f"Solution to this is relatively easy: use `anvi-script-reformat-fasta` with `--simplify-names` flag "
f"BEFORE generating your contigs database as we advice you to. Sorry you came all this way just to "
f"find out about this :/ Here is the origial error message anvi'o produced from the code beneath: {e}.")
search_results_dict = parser.get_search_results()
if not len(search_results_dict):
run.info_single("The HMM source '%s' returned 0 hits. SAD (but it's stil OK)." % source, nl_before=1)
if context == 'CONTIG':
# we are in trouble here. because our search results dictionary contains no gene calls, but contig
# names contain our hits. on the other hand, the rest of the code outside of this if statement
# expects a `search_results_dict` with gene caller ids in it. so there are two things we need to do.
# one is to come up with some new gene calls and add them to the contigs database. so things
# will go smoothly downstream. two, we will need to update our `search_results_dict` so it looks
# like a a dictionary the rest of the code expects with `gene_callers_id` fields. both of these
# steps are going to be taken care of in the following function. magic.
num_hits_before = len(search_results_dict)
search_results_dict = utils.get_pruned_HMM_hits_dict(search_results_dict)
num_hits_after = len(search_results_dict)
if num_hits_before != num_hits_after:
self.run.info('Pruned', '%d out of %d hits were removed due to redundancy' % (num_hits_before - num_hits_after, num_hits_before))
search_results_dict = self.add_new_gene_calls_to_contigs_db_and_update_serach_results_dict(kind_of_search,
search_results_dict,
skip_amino_acid_sequences=True)
self.append(source, reference, kind_of_search, domain, all_genes_searched_against, search_results_dict)
# FIXME: I have no clue why importing the anvio module is necessary at this point,
# but without this, mini test fails becasue "`anvio.DEBUG` is being used
# before initialization". nonsense.
import anvio
if not anvio.DEBUG:
commander.clean_tmp_dirs()
for v in list(target_files_dict.values()):
os.remove(v)
shutil.rmtree(tmp_directory_path)
def add_new_gene_calls_to_contigs_db_and_update_serach_results_dict(self, source, search_results_dict, skip_amino_acid_sequences=False):
"""Add new gene calls to the contigs database and update the HMM `search_results_dict`.
When we are looking for HMM hits in the context of CONTIGS, our hits do not
related to the gene calls we already have in a given contigs database. One
slution is to add additional gene calls for a given set of HMM hits to keep
them in the database."""
if not len(search_results_dict):
return search_results_dict
# we will first learn the next available id in the gene callers table
database = db.DB(self.db_path, utils.get_required_version_for_db(self.db_path))
next_id = database.get_max_value_in_column('genes_in_contigs', 'gene_callers_id', value_if_empty=0) + 1
database.disconnect()
additional_gene_calls = {}
for e in search_results_dict.values():
start = e['start']
stop = e['stop']
if stop > start:
direction = 'f'
else:
direction = 'r'
stop, start = start, stop
partial = 0 if ((stop - start) % 3 == 0) else 1
# add a new gene call in to the dictionary
additional_gene_calls[next_id] = {'contig': e['contig_name'],
'start': start,
'stop': stop,
'call_type': constants.gene_call_types['NONCODING'] if skip_amino_acid_sequences else constants.gene_call_types['CODING'],
'direction': direction,
'partial': partial,
'source': source,
'version': 'unknown'
}
# update the search results dictionary with gene callers id:
e['gene_callers_id'] = next_id
# update the next available gene callers id:
next_id += 1
if not len(additional_gene_calls):
return search_results_dict
# update the contigs db with the gene calls in `additional_gene_calls` dict.
gene_calls_table = TablesForGeneCalls(self.db_path, run=terminal.Run(verbose=False))
gene_calls_table.use_external_gene_calls_to_populate_genes_in_contigs_table(input_file_path=None,
gene_calls_dict=additional_gene_calls,
ignore_internal_stop_codons=True,
skip_amino_acid_sequences=skip_amino_acid_sequences)
gene_calls_table.populate_genes_in_splits_tables(gene_calls_dict=additional_gene_calls)
# refresh the gene calls dict
self.init_gene_calls_dict()
self.run.info('Gene calls added to db', '%d (from source "%s")' % (len(additional_gene_calls), source))
return search_results_dict
def remove_source(self, source):
"""Remove an HMM source from the database."""
tables_with_source = [
t.hmm_hits_info_table_name,
t.hmm_hits_table_name,
t.hmm_hits_splits_table_name,
t.genes_in_contigs_table_name,
t.gene_function_calls_table_name,
]
tables_with_gene_callers_id = [
t.gene_amino_acid_sequences_table_name,
t.genes_taxonomy_table_name,
t.genes_in_splits_table_name
]
# delete entries from tables with 'source' column
self.delete_entries_for_key('source', source, tables_with_source)
# collect gene caller ids that were added to the db via the HMM source
gene_caller_ids_to_remove = set(key for key, val in self.gene_calls_dict.items() if val['source'] == source)
# if there are any, remove them from tables with 'gene_callers_id' column
if len(gene_caller_ids_to_remove):
database = db.DB(self.db_path, utils.get_required_version_for_db(self.db_path))
CLAUSE = "gene_callers_id in (%s)" % (','.join([str(x) for x in gene_caller_ids_to_remove]))
for table in tables_with_gene_callers_id:
database.remove_some_rows_from_table(table, CLAUSE)
database.disconnect()
run.warning("%d gene caller ids that were added via the HMM source have been removed from \"%s\"" \
% (len(gene_caller_ids_to_remove), ', '.join(tables_with_gene_callers_id)))
def append(self, source, reference, kind_of_search, domain, all_genes, search_results_dict):
"""Append a new HMM source in the contigs database."""
# just to make 100% sure.
if source in list(hmmops.SequencesForHMMHits(self.db_path).hmm_hits_info.keys()):
raise ConfigError("The source '%s' you're trying to append is already in the database :( "
"You should have never been able to come here in the code unless you "
"have passed the `check_sources` sanity check. Very good but not "
"good really. Bad. Bad you." % source)
# we want to define unique identifiers for each gene first. this information will be used to track genes that will
# break into multiple pieces due to arbitrary split boundaries. while doing that, we will add the 'source' info
# into the dictionary, so it perfectly matches to the table structure
for entry_id in search_results_dict:
hit = search_results_dict[entry_id]
gene_call = self.gene_calls_dict[hit['gene_callers_id']]
hit['gene_unique_identifier'] = hashlib.sha224('_'.join([str(self.contigs_db_hash),
gene_call['contig'],
hit['gene_name'],
str(gene_call['start']),
str(gene_call['stop'])]).encode('utf-8')).hexdigest()
hit['source'] = source
database = db.DB(self.db_path, utils.get_required_version_for_db(self.db_path))
# push information about this search result into serach_info table.
db_entries = [source, reference, kind_of_search, domain, ', '.join(all_genes)]
database._exec('''INSERT INTO %s VALUES (?,?,?,?,?)''' % t.hmm_hits_info_table_name, db_entries)
# if our search results were empty, we can return from here.
if not len(search_results_dict):
database.disconnect()
return
# then populate serach_data table for each contig.
db_entries = []
for hit in list(search_results_dict.values()):
entry_id = self.next_id(t.hmm_hits_table_name)
db_entries.append(tuple([entry_id] + [hit[h] for h in t.hmm_hits_table_structure[1:]]))
# tiny hack here: for each hit, we are generating a unique id (`entry_id`), and feeding that information
# back into the dictionary to pass it to processing of splits, so each split-level
# entry knows who is their parent.
hit['hmm_hit_entry_id'] = entry_id
database._exec_many('''INSERT INTO %s VALUES (?,?,?,?,?,?,?)''' % t.hmm_hits_table_name, db_entries)
db_entries = self.process_splits(search_results_dict)
database._exec_many('''INSERT INTO %s VALUES (?,?,?,?)''' % t.hmm_hits_splits_table_name, db_entries)
database.disconnect()
def process_splits(self, search_results_dict):
hits_per_contig = {}
for hit in list(search_results_dict.values()):
contig_name = self.gene_calls_dict[hit['gene_callers_id']]['contig']
if contig_name in hits_per_contig:
hits_per_contig[contig_name].append(hit)
else:
hits_per_contig[contig_name] = [hit]
db_entries_for_splits = []
for contig in self.contigs_info:
if contig not in hits_per_contig:
# no hits for this contig. pity!
continue
for split_name in self.contig_name_to_splits[contig]:
split_start = self.splits_info[split_name]['start']
split_stop = self.splits_info[split_name]['end']
# FIXME: this really needs some explanation.
for hit in hits_per_contig[contig]:
hit_start = self.gene_calls_dict[hit['gene_callers_id']]['start']
hit_stop = self.gene_calls_dict[hit['gene_callers_id']]['stop']
if hit_stop > split_start and hit_start < split_stop:
gene_length = hit_stop - hit_start
# if only a part of the gene is in the split:
start_in_split = (split_start if hit_start < split_start else hit_start) - split_start
stop_in_split = (split_stop if hit_stop > split_stop else hit_stop) - split_start
percentage_in_split = (stop_in_split - start_in_split) * 100.0 / gene_length
db_entry = tuple([hit['hmm_hit_entry_id'], split_name, percentage_in_split, hit['source']])
db_entries_for_splits.append(db_entry)
return db_entries_for_splits
|
meren/anvio
|
anvio/tables/hmmhits.py
|
Python
|
gpl-3.0
| 27,874
|
import subprocess
import textwrap
import socket
import vim
import sys
import os
import imp
from ui import DebugUI
from dbgp import DBGP
def vim_init():
'''put DBG specific keybindings here -- e.g F1, whatever'''
vim.command('ca dbg Dbg')
def vim_quit():
'''remove DBG specific keybindings'''
vim.command('cuna dbg')
def get_vim(name, default, fn=str):
if vim.eval('exists("%s")' % name) == '1':
return vim.eval(name)
return default
import types
class Registrar:
def __init__(self, args=(), kwds=(), named=True):
if named:
self.reg = {}
else:
self.reg = []
self.args = args
self.kwds = kwds
self.named = named
def register(self, *args, **kwds):
def meta(func):
self.add(func, args, kwds)
return meta
def add(self, func, args, kwds):
if self.named:
self.reg[args[0]] = {'function':func, 'args':args[1:], 'kwds':kwds}
else:
self.reg.append({'function':func, 'args':args, 'kwds':kwds})
return func
def bind(self, inst):
res = {}
for key, value in self.reg.iteritems():
value = value.copy()
res[key] = value
if callable(value['function']):
value['function'] = types.MethodType(value['function'], inst, inst.__class__)
return res
__call__ = register
class CmdRegistrar(Registrar):
def add(self, func, args, kwds):
lead = kwds.get('lead', '')
disabled_mappings = False
if vim.eval("exists('g:vim_debug_disable_mappings')") != "0":
disabled_mappings = vim.eval("g:vim_debug_disable_mappings") != "0"
if lead and not disabled_mappings:
vim.command('map <Leader>%s :Dbg %s<cr>' % (lead, args[0]))
dct = {'function':func, 'options':kwds}
for name in args:
self.reg[name] = dct
class Debugger:
''' This is the main debugger class... '''
options = {'port':9000, 'max_children':32, 'max_data':'1024', 'minbufexpl':0, 'max_depth':1}
def __init__(self):
self.started = False
self.watching = {}
self._type = None
def init_vim(self):
self.ui = DebugUI()
self.settings = {}
for k,v in self.options.iteritems():
self.settings[k] = get_vim(k, v, type(v))
vim_init()
def start_url(self, url):
if '?' in url:
url += '&'
else:
url += '?'
url += 'XDEBUG_SESSION_START=vim_phpdebug'
self._type = 'php'
# only linux and mac supported atm
command = 'xdg-open' if sys.platform.startswith('linux') else 'open'
try:
subprocess.Popen((command, url), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
print 'failed to start a browser. aborting debug session'
return
return self.start()
def start_py(self, fname):
if os.name == 'nt':
_,PYDBGP,_ = imp.find_module('dbgp')
PYDBGP = PYDBGP + '/../EGG-INFO/scripts/pydbgp.py'
subprocess.Popen(('python.exe',PYDBGP, '-d', 'localhost:9000', fname), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
subprocess.Popen(('pydbgp.py', '-d', 'localhost:9000', fname), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self._type = 'python'
return self.start()
def start(self):
## self.breaks = BreakPointManager()
self.started = True
self.bend = DBGP(self.settings, self.ui.windows['log'].write, self._type)
for key, value in self.handle.bind(self).iteritems():
if callable(value['function']):
fn = value['function']
else:
tmp = self
for item in value['function'].split('.'):
tmp = getattr(tmp, item)
fn = tmp
self.bend.addCommandHandler(key, fn)
self.bend.addCommandHandler('<stream>', self.ui.windows['output'].add)
if not self.bend.connect():
print textwrap.dedent('''\
Unable to connect to debug server. Things to check:
- you refreshed the page during the 5 second
period
- you have the xdebug extension installed (apt-get
install php5-xdebug on ubuntu)
- you set the XDEBUG_SESSION_START cookie
- "xdebug.remote_enable = 1" is in php.ini (not
enabled by default)
If you have any questions, look at
http://tech.blog.box.net/2007/06/20/how-to-debug-php-with-vim-and-xdebug-on-linux/
''')
return False
self.ui.startup()
self.bend.get_packets(1)
self.bend.command('feature_set', 'n', 'max_children', 'v', self.settings['max_children'])
self.bend.command('feature_set', 'n', 'max_data', 'v', self.settings['max_data'])
self.bend.command('feature_set', 'n', 'max_depth', 'v', self.settings['max_depth'])
self.bend.command('stdout', 'c', '1')
self.bend.command('stderr', 'c', '1')
for name in ('max_children', 'max_data', 'max_depth'):
self.bend.command('feature_set', 'n', name, 'v', self.settings[name], suppress=True)
self.bend.command('step_into')
self.bend.command('context_get')
self.bend.command('stack_get')
self.bend.command('status')
self.ui.go_srcview()
def set_status(self, status):
self.status = status
# self.party
''' setup + register vim commands '''
cmd = CmdRegistrar()
cmd('over', help='step over next function call', lead='o')('step_over')
cmd('into', help='step into next function call', lead='i')('step_into')
cmd('out', help='step out of current function call', lead='t')('step_out')
cmd('run', help='continue execution until a breakpoint is reached or the program ends', lead='r')('run')
@cmd('eval', help='eval some code', plain=True)
def eval_(self, code):
self.bend.command('eval', data=code)
self.bend.command('context_get')
@cmd('quit', 'stop', 'exit', help='exit the debugger')
def quit(self):
self.bend.close()
self.ui.close()
vim_quit()
@cmd('up', help='go up the stack', lead='u')
def up(self):
self.ui.stack_up()
@cmd('down', help='go down the stack', lead='d')
def down(self):
self.ui.stack_down()
@cmd('watch', help='execute watch functions', lead='w')
def watch(self):
lines = self.ui.windows['watch'].expressions.buffer
self.watching = {}
for i, line in enumerate(lines[1:]):
if not line.strip():continue
# self.ui.windows['log'].write('evalling:' + line)
tid = self.bend.command('eval', data=line, suppress=True)
self.watching[tid] = i+1
self.bend.get_packets()
@cmd('break', help='set a breakpoint', lead='b')
def break_(self):
(row, col) = vim.current.window.cursor
file = os.path.abspath(vim.current.buffer.name)
if not os.path.exists(file):
print 'Not in a file'
return
bid = self.ui.break_at(file, row)
if bid == -1:
tid = self.bend.cid + 1
self.ui.queue_break(tid, file, row)
self.bend.command('breakpoint_set', 't', 'line', 'f', 'file://' + file, 'n', row, data='')
else:
tid = self.bend.cid + 1
self.ui.queue_break_remove(tid, bid)
self.bend.command('breakpoint_remove', 'd', bid)
@cmd('here', help='continue execution until the cursor (tmp breakpoint)', lead='h')
def here(self):
(row, col) = vim.current.window.cursor
file = os.path.abspath(vim.current.buffer.name)
if not os.path.exists(file):
print 'Not in a file'
return
tid = self.bend.cid + 1
# self.ui.queue_break(tid, file, row)
self.bend.command('breakpoint_set', 't', 'line', 'r', '1', 'f', 'file://' + file, 'n', row, data='')
self.bend.command('run')
def commands(self):
self._commands = self.cmd.bind(self)
return self._commands
handle = Registrar()
@handle('stack_get')
def _stack_get(self, node):
line = self.ui.windows['stack'].refresh(node)
self.ui.set_srcview(line[2], line[3])
@handle('breakpoint_set')
def _breakpoint_set(self, node):
self.ui.set_break(int(node.getAttribute('transaction_id')), node.getAttribute('id'))
self.ui.go_srcview()
@handle('breakpoint_remove')
def _breakpoint_remove(self, node):
self.ui.clear_break(int(node.getAttribute('transaction_id')))
self.ui.go_srcview()
def _status(self, node):
if node.getAttribute('reason') == 'ok':
self.set_status(node.getAttribute('status'))
def _change(self, node):
if node.getAttribute('reason') == 'ok':
self.set_status(node.getAttribute('status'))
if self.status != 'stopping':
try:
self.bend.command('context_get')
self.bend.command('stack_get')
except (EOFError, socket.error):
self.disable()
else:
self.disable()
def disable(self):
print 'Execution has ended; connection closed. type :Dbg quit to exit debugger'
self.ui.unhighlight()
for cmd in self._commands.keys():
if cmd not in ('quit', 'close'):
self._commands.pop(cmd)
@handle('<init>')
def _init(self, node):
file = node.getAttribute('fileuri')
self.ui.set_srcview(file, 1)
handle('status')(_status)
handle('stdout')(_status)
handle('stderr')(_status)
handle('step_into')(_change)
handle('step_out')(_change)
handle('step_over')(_change)
handle('run')(_change)
def _log(self, node):
self.ui.windows['log'].write(node.toprettyxml(indent=' '))
pass # print node
@handle('eval')
def _eval(self, node):
id = int(node.getAttribute('transaction_id'))
if id in self.watching:
self.ui.windows['watch'].set_result(self.watching.pop(id), node)
self.ui.windows['watch'].expressions.focus()
handle('property_get')(_log)
handle('property_set')(_log)
@handle('context_get')
def _context_get(self, node):
self.ui.windows['scope'].refresh(node)
handle('feature_set')(_log)
# vim: et sw=4 sts=4
|
elsdrm/vim-debug
|
plugin/vim_debug/new_debugger.py
|
Python
|
gpl-3.0
| 10,718
|
from flask import Flask
# from flask.ext.mail import Mail
# from flask.ext.sqlalchemy import SQLAlchemy
# from flask.ext.security import Security
# from flask.ext.babel import Babel
# from flask.ext.assets import Environment
# mail = Mail()
# db = SQLAlchemy()
# security = Security()
# assets = Environment()
# babel = Babel()
|
jkur/snmp-manager
|
app/__init__.py
|
Python
|
gpl-3.0
| 330
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 - 2021 Pytroll developers
#
# Author(s):
#
# Martin Raspaud <martin.raspaud@smhi.se>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test suite for the scisys receiver."""
# Test cases.
import datetime
import os
import unittest
from pytroll_collectors.scisys import MessageReceiver, TwoMetMessage
hostname = 'localhost'
input_stoprc = '<message timestamp="2013-02-18T09:21:35" sequence="7482" severity="INFO" messageID="0" type="2met.message" sourcePU="SMHI-Linux" sourceSU="POESAcquisition" sourceModule="POES" sourceInstance="1"><body>STOPRC Stop reception: Satellite: NPP, Orbit number: 6796, Risetime: 2013-02-18 09:08:09, Falltime: 2013-02-18 09:21:33</body></message>' # noqa
input_dispatch_viirs = '<message timestamp="2013-02-18T09:24:20" sequence="27098" severity="INFO" messageID="8250" type="2met.filehandler.sink.success" sourcePU="SMHI-Linux" sourceSU="GMCSERVER" sourceModule="GMCSERVER" sourceInstance="1"><body>FILDIS File Dispatch: /data/npp/RNSCA-RVIRS_npp_d20130218_t0908103_e0921256_b00001_c20130218092411165000_nfts_drl.h5 ftp://{hostname}:21/tmp/RNSCA-RVIRS_npp_d20130218_t0908103_e0921256_b00001_c20130218092411165000_nfts_drl.h5</body></message>'.format( # noqa
hostname=hostname)
input_dispatch_atms = '<message timestamp="2013-02-18T09:24:21" sequence="27100" severity="INFO" messageID="8250" type="2met.filehandler.sink.success" sourcePU="SMHI-Linux" sourceSU="GMCSERVER" sourceModule="GMCSERVER" sourceInstance="1"><body>FILDIS File Dispatch: /data/npp/RATMS-RNSCA_npp_d20130218_t0908194_e0921055_b00001_c20130218092411244000_nfts_drl.h5 ftp://{hostname}:21/tmp/RATMS-RNSCA_npp_d20130218_t0908194_e0921055_b00001_c20130218092411244000_nfts_drl.h5</body></message>'.format( # noqa
hostname=hostname)
viirs = {'platform_name': 'Suomi-NPP', 'format': 'RDR',
'start_time': datetime.datetime(2013, 2, 18, 9, 8, 10, 300000),
'data_processing_level': '0', 'orbit_number': 6796,
'uri': 'ssh://{hostname}/tmp/RNSCA-RVIRS_npp_d20130218_t0908103_e0921256_b00001_c20130218092411165000_nfts_drl.h5'.format(hostname=hostname), # noqa
'uid': 'RNSCA-RVIRS_npp_d20130218_t0908103_e0921256_b00001_c20130218092411165000_nfts_drl.h5',
'sensor': 'viirs',
'end_time': datetime.datetime(2013, 2, 18, 9, 21, 25, 600000),
'type': 'HDF5', 'variant': 'DR'}
atms = {'platform_name': 'Suomi-NPP', 'format': 'RDR', 'start_time':
datetime.datetime(2013, 2, 18, 9, 8, 19, 400000),
'data_processing_level': '0', 'orbit_number': 6796, 'uri':
'ssh://{hostname}/tmp/RATMS-RNSCA_npp_d20130218_t0908194_e0921055_b00001_c20130218092411244000_nfts_drl.h5'.format( # noqa
hostname=hostname),
'uid':
'RATMS-RNSCA_npp_d20130218_t0908194_e0921055_b00001_c20130218092411244000_nfts_drl.h5',
'sensor': 'atms',
'end_time': datetime.datetime(2013, 2, 18, 9, 21, 5, 500000),
'type': 'HDF5', 'variant': 'DR'}
stoprc_terra = '<message timestamp="2014-10-30T21:03:50" sequence="6153" severity="INFO" messageID="0" type="2met.message" sourcePU="SMHI-Linux" sourceSU="POESAcquisition" sourceModule="POES" sourceInstance="1"><body>STOPRC Stop reception: Satellite: TERRA, Orbit number: 79082, Risetime: 2014-10-30 20:49:50, Falltime: 2014-10-30 21:03:50</body></message>' # noqa
fildis_terra = '<message timestamp="2014-10-30T21:03:57" sequence="213208" severity="INFO" messageID="8250" type="2met.filehandler.sink.success" sourcePU="SMHI-Linux" sourceSU="GMCSERVER" sourceModule="GMCSERVER" sourceInstance="1"><body>FILDIS File Dispatch: /data/modis/P0420064AAAAAAAAAAAAAA14303204950001.PDS ftp://{hostname}:21/tmp/P0420064AAAAAAAAAAAAAA14303204950001.PDS</body></message>'.format( # noqa
hostname=hostname)
msg_terra = {"platform_name": "EOS-Terra", "uri":
"ssh://{hostname}/tmp/P0420064AAAAAAAAAAAAAA14303204950001.PDS".format(hostname=hostname), "format": "PDS",
"start_time": datetime.datetime(2014, 10, 30, 20, 49, 50),
"data_processing_level": "0", "orbit_number": 79082, "uid":
"P0420064AAAAAAAAAAAAAA14303204950001.PDS",
"sensor": "modis",
"end_time": datetime.datetime(2014, 10, 30, 21, 3, 50),
"type": "binary", 'variant': 'DR'}
stoprc_n19 = '<message timestamp="2014-10-28T07:25:37" sequence="472" severity="INFO" messageID="0" type="2met.message" sourcePU="SMHI-Linux" sourceSU="HRPTAcquisition" sourceModule="FSSRVC" sourceInstance="1"><body>STOPRC Stop reception: Satellite: NOAA 19, Orbit number: 29477, Risetime: 2014-10-28 07:16:01, Falltime: 2014-10-28 07:25:37</body></message>' # noqa
fildis_n19 = '<message timestamp="2014-10-28T07:25:43" sequence="203257" severity="INFO" messageID="8250" type="2met.filehandler.sink.success" sourcePU="SMHI-Linux" sourceSU="GMCSERVER" sourceModule="GMCSERVER" sourceInstance="1"><body>FILDIS File Dispatch: /data/hrpt/20141028071601_NOAA_19.hmf ftp://{hostname}:21/tmp/20141028071601_NOAA_19.hmf</body></message>'.format( # noqa
hostname=hostname)
msg_n19 = {"platform_name": "NOAA-19", "format": "HRPT",
"start_time": datetime.datetime(2014, 10, 28, 7, 16, 1),
"data_processing_level": "0", "orbit_number": 29477,
"uri": "ssh://{hostname}/tmp/20141028071601_NOAA_19.hmf".format(hostname=hostname),
"uid": "20141028071601_NOAA_19.hmf",
"sensor": ("avhrr/3", "mhs", "amsu-a", "hirs/4"),
"end_time": datetime.datetime(2014, 10, 28, 7, 25, 37),
"type": "binary", 'variant': 'DR'}
stoprc_m01 = '<message timestamp="2014-10-28T08:45:22" sequence="1157" severity="INFO" messageID="0" type="2met.message" sourcePU="SMHI-Linux" sourceSU="HRPTAcquisition" sourceModule="FSSRVC" sourceInstance="1"><body>STOPRC Stop reception: Satellite: METOP-B, Orbit number: 10948, Risetime: 2014-10-28 08:30:10, Falltime: 2014-10-28 08:45:22</body></message>' # noqa
fildis_m01 = '<message timestamp="2014-10-28T08:45:27" sequence="203535" severity="INFO" messageID="8250" type="2met.filehandler.sink.success" sourcePU="SMHI-Linux" sourceSU="GMCSERVER" sourceModule="GMCSERVER" sourceInstance="1"><body>FILDIS File Dispatch: /data/metop/MHSx_HRP_00_M01_20141028083003Z_20141028084510Z_N_O_20141028083010Z ftp://{hostname}:21/tmp/MHSx_HRP_00_M01_20141028083003Z_20141028084510Z_N_O_20141028083010Z</body></message>'.format( # noqa
hostname=hostname)
msg_m01 = {"platform_name": "Metop-B", "format": "EPS",
"start_time": datetime.datetime(2014, 10, 28, 8, 30, 3),
"data_processing_level": "0", "orbit_number": 10948,
"uri": "ssh://{hostname}/tmp/MHSx_HRP_00_M01_20141028083003Z_20141028084510Z_N_O_20141028083010Z".format(hostname=hostname), # noqa
"uid": "MHSx_HRP_00_M01_20141028083003Z_20141028084510Z_N_O_20141028083010Z",
"sensor": "mhs",
"end_time": datetime.datetime(2014, 10, 28, 8, 45, 10),
"type": "binary", 'variant': 'DR'}
startrc_npp2 = '<message timestamp="2014-10-31T08:53:52" sequence="9096" severity="INFO" messageID="0" type="2met.message" sourcePU="SMHI-Linux" sourceSU="POESAcquisition" sourceModule="POES" sourceInstance="1"><body>STRTRC Start reception: Satellite: NPP, Orbit number: 15591, Risetime: 2014-10-31 08:53:52, Falltime: 2014-10-31 09:06:28</body></message>' # noqa
stoprc_npp2 = '<message timestamp="2014-10-31T09:06:28" sequence="9340" severity="INFO" messageID="0" type="2met.message" sourcePU="SMHI-Linux" sourceSU="POESAcquisition" sourceModule="POES" sourceInstance="1"><body>STOPRC Stop reception: Satellite: NPP, Orbit number: 15591, Risetime: 2014-10-31 08:53:52, Falltime: 2014-10-31 09:06:28</body></message>' # noqa
fildis_npp2 = '<message timestamp="2014-10-31T09:06:25" sequence="216010" severity="INFO" messageID="8250" type="2met.filehandler.sink.success" sourcePU="SMHI-Linux" sourceSU="GMCSERVER" sourceModule="GMCSERVER" sourceInstance="1"><body>FILDIS File Dispatch: /data/npp/RCRIS-RNSCA_npp_d20141031_t0905166_e0905484_b00001_c20141031090623200000_nfts_drl.h5 ftp://{hostname}:21//tmp</body></message>'.format( # noqa
hostname=hostname)
msg_npp2 = {"orbit_number": 15591,
"uid": "RCRIS-RNSCA_npp_d20141031_t0905166_e0905484_b00001_c20141031090623200000_nfts_drl.h5",
"format": "RDR", "sensor": "cris",
"start_time": datetime.datetime(2014, 10, 31, 9, 5, 16, 600000),
"uri": "ssh://{hostname}//tmp/RCRIS-RNSCA_npp_d20141031_t0905166_e0905484_b00001_c20141031090623200000_nfts_drl.h5".format(hostname=hostname), # noqa
"platform_name": "Suomi-NPP",
"end_time": datetime.datetime(2014, 10, 31, 9, 5, 48, 400000),
"type": "HDF5", "data_processing_level": "0", 'variant': 'DR'}
def touch(fname):
"""Create an empty file."""
open(fname, 'a').close()
class ScisysReceiverTest(unittest.TestCase):
"""Testing the Scisys receiver."""
def test_reception(self):
"""Test the reception."""
msg_rec = MessageReceiver("nimbus")
# NPP
string = TwoMetMessage(input_stoprc)
to_send = msg_rec.receive(string)
self.assertTrue(to_send is None)
filename = os.path.join('/tmp', viirs['uid'])
touch(filename)
string = TwoMetMessage(input_dispatch_viirs)
to_send = msg_rec.receive(string)
self.assertDictEqual(to_send, viirs)
os.remove(filename)
filename = os.path.join('/tmp', atms['uid'])
touch(filename)
string = TwoMetMessage(input_dispatch_atms)
to_send = msg_rec.receive(string)
self.assertDictEqual(to_send, atms)
os.remove(filename)
# NPP with start
string = TwoMetMessage(startrc_npp2)
to_send = msg_rec.receive(string)
self.assertTrue(to_send is None)
filename = os.path.join('/tmp', msg_npp2['uid'])
touch(filename)
string = TwoMetMessage(fildis_npp2)
to_send = msg_rec.receive(string)
self.assertDictEqual(to_send, msg_npp2)
os.remove(filename)
string = TwoMetMessage(stoprc_npp2)
to_send = msg_rec.receive(string)
self.assertTrue(to_send is None)
filename = os.path.join('/tmp', msg_npp2['uid'])
touch(filename)
string = TwoMetMessage(fildis_npp2)
to_send = msg_rec.receive(string)
self.assertDictEqual(to_send, msg_npp2)
os.remove(filename)
# Terra
string = TwoMetMessage(stoprc_terra)
to_send = msg_rec.receive(string)
self.assertTrue(to_send is None)
filename = os.path.join('/tmp', msg_terra['uid'])
touch(filename)
string = TwoMetMessage(fildis_terra)
to_send = msg_rec.receive(string)
self.assertDictEqual(to_send, msg_terra)
os.remove(filename)
# NOAA-19
string = TwoMetMessage(stoprc_n19)
to_send = msg_rec.receive(string)
self.assertTrue(to_send is None)
filename = os.path.join('/tmp', msg_n19['uid'])
touch(filename)
string = TwoMetMessage(fildis_n19)
to_send = msg_rec.receive(string)
self.assertDictEqual(to_send, msg_n19)
os.remove(filename)
# Metop-B
string = TwoMetMessage(stoprc_m01)
to_send = msg_rec.receive(string)
self.assertTrue(to_send is None)
filename = os.path.join('/tmp', msg_m01['uid'])
touch(filename)
string = TwoMetMessage(fildis_m01)
to_send = msg_rec.receive(string)
self.assertDictEqual(to_send, msg_m01)
os.remove(filename)
def suite():
"""Test suite for test_scisys."""
loader = unittest.TestLoader()
mysuite = unittest.TestSuite()
mysuite.addTest(loader.loadTestsFromTestCase(ScisysReceiverTest))
return mysuite
if __name__ == '__main__':
unittest.main()
|
pytroll/pytroll-collectors
|
pytroll_collectors/tests/test_scisys.py
|
Python
|
gpl-3.0
| 12,537
|
#!/usr/bin/python
import ldap
import ldap.modlist as modlist
l = ldap.initialize("ldap://192.168.1.5")
l.simple_bind_s("cn=admin,dc=pyatt,dc=lan","qwerty")
try:
#l.search_s("cn=admin,dc=pyatt,dc=lan", ldap.SCOPE_SUBTREE, "objectclass=*")
print l.search_s('ou=Group,dc=pyatt,dc=lan', ldap.SCOPE_SUBTREE,'(cn=kpyatt*)',['cn','objectClass','Password'])
except:
print "Didn't find that user"
import ldif
"""
dn = "cn=foobar,ou=Group,dc=pyatt,dc=lan"
insertLDIF = {}
#insertLDIF['ou'] = ['People','Group']
insertLDIF['objectClass'] = ['posixGroup','top']
insertLDIF['gidNumber'] = ['550']
insertLDIF['memberUid'] = ['foobar']
insertLDIF['userpassword'] = ['barfoo']
#myLDIF = ldif.CreateLDIF(dn, insertLDIF)
#print myLDIF
# Send the add LDIF
newldif = modlist.addModlist(insertLDIF)
#l.add_s(dn, newldif)
print "Added to group: Group"
"""
# This adds a new user to Docuwiki and SSH
dn = "cn=Foo Bar,ou=People,dc=pyatt,dc=lan"
insertLDIF = {}
insertLDIF['cn'] = ['Foo Bar']
insertLDIF['gidNumber'] = ['500']
insertLDIF['givenName'] = ['Foo']
insertLDIF['homeDirectory'] = ['/home/foobar']
insertLDIF['objectClass'] = ['inetOrgPerson','posixAccount','top']
insertLDIF['userpassword'] = ['barfoo']
insertLDIF['sn'] = ['Bar']
insertLDIF['userid'] = ['foobar']
insertLDIF['uidNumber'] = ['1002']
insertLDIF['ou'] = ['People','Group']
insertLDIF['loginShell'] = ['/bin/bash']
# This prints an LDIF file
myLDIF = ldif.CreateLDIF(dn, insertLDIF)
print myLDIF
# Send the add LDIF
newldif = modlist.addModlist(insertLDIF)
l.add_s(dn, newldif)
print "Added to group: People"
# All done here
l.unbind_s()
|
roguefalcon/rpi_docker_images
|
user-registration/ldap_test.py
|
Python
|
gpl-3.0
| 1,607
|
"""The game of life commands"""
from GOL_Sim.GOL_Simulation import GOL_Simulation
from src.CommandSystem import CommandSystem
GOL_COMMANDS = CommandSystem(help_summary='Game of life genetic algorithm commands.')
GOL_INSTANCES = {}
GOL_MAX_PROCESSING = 50 ** 50 * (7 * 7)
GOL_MAX_CYCLES = 25
def _numberify(terms):
num_terms = []
for index, term in enumerate(terms):
try:
if '.' in term:
num_terms.append(float(term))
else:
num_terms.append(int(term))
except ValueError:
return index
return num_terms
def _gol_new_validate(args):
default_vals = [50, 5, 5, 30]
intensive_args = [args[i] if len(args) > i else val for i, val in enumerate(default_vals)]
accumulator = intensive_args[0] ** intensive_args[3] * (intensive_args[1] * intensive_args[2])
if accumulator <= GOL_MAX_PROCESSING:
return True
async def _gol_new(client, command_terms, message):
string_args = command_terms.split(' ')[2:]
args = _numberify(string_args)
response = ''
if not message.server:
response = 'GOL commands will only work on a server.'
elif isinstance(args, int):
response += 'The term at index ' + str(args + 2) + ' has to be a number.\n'
elif len(args) > 6:
response += 'Expecting 6 or less terms.'
if not response and str(message.server):
if _gol_new_validate(args):
if str(message.server) in GOL_INSTANCES:
del GOL_INSTANCES[str(message.server)]
try:
GOL_INSTANCES[str(message.server)] = GOL_Simulation(*args)
response = 'Successfully created a new game of life genetic algorithm.'
except TypeError:
response = 'All arguments have to be integers except for mutation chance which is a float.'
else:
response = 'Max processing exceeded. Please choose smaller input arguments.'
await client.send_message(message.channel, response)
GOL_COMMANDS.add_command(
'new',
cmd_func=_gol_new,
help_summary='Create a new game of life genetic algorithm.',
help_full='Where all arguments are optional and all are numbers.\nDefaults: `size=50, width=5, height=5, iterations=30, mutation_chance=0.025, creatures_to_remain=5`\nUsage: `gol new size width height iterations mutation_chance creatures_to_remain`'
)
def _validate_gol_instance(server):
if server and server in GOL_INSTANCES:
return ''
else:
return 'Game of life instance does not exist. To create, use `gol new`'
def _cycle_instance(instance):
instance.evaluate()
response = instance.stats()
instance.evolve_population()
return response
async def _gol_next_cycle(client, command_terms, message):
response = _validate_gol_instance(str(message.server))
if not response:
response = _cycle_instance(GOL_INSTANCES[str(message.server)])
await client.send_message(message.channel, response)
GOL_COMMANDS.add_command(
'next_cycle',
cmd_func=_gol_next_cycle,
help_summary='Evolves the population and gives stats for the population.',
help_full='Receives no arguments.\nFirst it will evaluate and then it will get the stats after evaluation and finally evolve the population.'
)
PROGRESS_MESSAGE = {}
PROGRESS_MESSAGE['format'] = lambda curr, limit: 'Progress: ' + str(curr / limit * 100)[:5] + '%'
async def _send_progress(client, message, curr, limit):
if PROGRESS_MESSAGE['new_message']:
PROGRESS_MESSAGE['new_message'] = False
PROGRESS_MESSAGE['message'] = await client.send_message(message.channel, PROGRESS_MESSAGE['format'](curr, limit))
else:
PROGRESS_MESSAGE['message'] = await client.edit_message(PROGRESS_MESSAGE['message'], PROGRESS_MESSAGE['format'](curr, limit))
async def _gol_cycle(client, command_terms, message):
response = ''
response = _validate_gol_instance(str(message.server))
PROGRESS_MESSAGE['new_message'] = True
if not response:
try:
max_iterations = int(command_terms.split(' ')[2])
if 1 <= max_iterations <= GOL_MAX_CYCLES:
for i in range(max_iterations):
response = _cycle_instance(GOL_INSTANCES[str(message.server)])
if i < max_iterations - 1:
await _send_progress(client, message, i + 1, max_iterations)
if PROGRESS_MESSAGE['message']:
await client.delete_message(PROGRESS_MESSAGE['message'])
else:
response = 'Limit out of range. Choose an integer between 1-' + str(GOL_MAX_CYCLES) + '.'
except ValueError:
response = 'The second argument has to be an integer between 1-' + str(GOL_MAX_CYCLES) + '.'
await client.send_message(message.channel, response)
GOL_COMMANDS.add_command(
'cycle',
cmd_func=_gol_cycle,
help_summary='Evolves the population a number of times and gives stats for the population afterwards.',
help_full='Receives 1 argument; a number from 1-' + str(GOL_MAX_CYCLES) + ' to cycle through the simulation.\n Usage: `gol cycle limit`'
)
|
eniallator/Discord-EniBot
|
src/CommandSystems/GOL.py
|
Python
|
gpl-3.0
| 5,184
|
#task_6
import matplotlib.pyplot as plt
import networkx as nx
# reading form file
input_file = open('data.txt', 'r')
list_of_lines = input_file.readlines()
# creating nodes and edges of the graph
graph = nx.Graph()
for s in list_of_lines:
a, b, w = tuple(s.split())
graph.add_edge(a, b, weight = float(w))
# поиск и вывод пути
pos = nx.circular_layout(graph)
s, t = graph.nodes()[0], graph.nodes()[6]
print('Из ', s, 'в ', t)
path = list()
if nx.has_path(graph, s, t):
path = nx.shortest_path(graph, s, t, 'weight')
print(path)
else:
print('нет пути!')
nx.draw_networkx_nodes(graph, pos,
nodelist = [node for node in graph if node not in path],
node_color='blue',
node_size=400,
alpha=0.5)
nx.draw_networkx_edges(graph, pos,
edgelist = [edge for edge in graph.edges() if edge not in [(path[i - 1], path[i]) for i in range(1, len(path))]],
alpha=0.5,
edge_color='green')
nx.draw_networkx_nodes(graph, pos, nodelist = path,
node_color='red',
node_size=300,
alpha=1)
nx.draw_networkx_edges(nx.DiGraph(graph), pos,
edgelist = [(path[i - 1], path[i]) for i in range(1, len(path))],
alpha=1,
edge_color='orange',
arrows = True)
nx.draw_networkx_labels(graph, pos,
labels = {s: '1', t: '2'})
plt.show()
# PATH=C:\Program Files\Common Files\Microsoft Shared\Windows Live;C:\Users\Semjon\AppData\Local\Programs\Python\Python35-32\Scripts;C:\Users\Semjon\AppData\Local\Programs\Python\Python35-32;C:\ProgramData\Oracle\Java\javapath;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Program Files (x86)\ATI Technologies\ATI.ACE\Core-Static;C:\Program Files (x86)\Skype\Phone\;C:\Program Files\Git\cmd;C:\Program Files\Common Files\Microsoft Shared\Windows Live;C:\Users\Semjon\LaTeX\miktex\bin\x64\;
|
Senbjorn/mipt_lab_2016
|
lab_20/task_6.py
|
Python
|
gpl-3.0
| 1,869
|
from django.contrib import admin
from dream.core.models import MatchTeam
@admin.register(MatchTeam)
class MatchTeamAdmin(admin.ModelAdmin):
list_display = (
'team',
'role',
'tactics',
'tactics_ref'
)
|
alumarcu/dream-framework
|
dream/core/admin/match_team.py
|
Python
|
gpl-3.0
| 243
|
#!/usr/bin/python
'''\
Kickstart Debugger, written by Schlomo Schapiro.
Licensed under the <a href="http://www.gnu.org/licenses/gpl.html">GNU General Public License</a>.
The favicon.ico is taken from the XFCE project.
'''
__version__ = "$Id$"
import base64
# remember to use %% and \\ instead of % and \ !
builtinPages = {
"/" : u'''\
<html><head><title>%(host)s - Kickstart Debugger</title>
<script language="javascript" type="text/javascript">
function scrollToBottom(el) {
var l=document.getElementById(el);
l.contentWindow.scrollTo(0,10000000000000);
l.height=window.innerHeight-l.offsetTop-20;
}
function loadInContentPanel(uri, legend) {
var el = document.getElementById('contentpanel');
if (el != null) {
var h = window.innerHeight - 100;
el.innerHTML = "<legend>" + legend + "</legend><iframe id='contentframe' onload='scrollToBottom(\\"contentframe\\")' frameborder='0' height='" + h + "' width='100%%' src='" + uri + "'/>";
}
return(false);
}
</script>
</head>
<body>
<h1>%(host)s - Kickstart Debugger</h1>
<div style="float: left; width: 20%%;">
<fieldset><legend>What do you want to see?</legend>
<p><ul>
%(screenshotLink)s
<li><a onclick="return loadInContentPanel('/fs/%(ks_file)s','Kickstart File')" href="/fs/%(ks_file)s">kickstart file</a></li>
<li><a onclick="return loadInContentPanel('/fs/mnt/sysimage/root/ks-post.log','Kickstart %%post Script Log')" href="/fs/mnt/sysimage/root/ks-post.log">%%post log</a></li>
<li><a onclick="return loadInContentPanel('/fs/tmp/anaconda.log','Anaconda log file')" href="/fs/tmp/anaconda.log">anaconda.log</a></li>
<li><a onclick="return loadInContentPanel('/fs/tmp/','Browse Installation System (<code>/tmp</code>)')" href="/fs/tmp/">/tmp of Installation System</a></li>
<li><a onclick="return loadInContentPanel('/fs/mnt/sysimage/','Browse Installed System (<code>/mnt/sysimage</code>)')" href="/fs/mnt/sysimage/">Installed Root Filesystem</a></li>
</ul></p>
</fieldset>
<fieldset><legend>Downloads:</legend>
<p><ul>
<li><a href="/download/tmp/ks.cfg">kickstart file</a></li>
<li><a href="/download/mnt/sysimage/root/ks-post.log">Kickstart %%post log</a></li>
<li><a href="/download/tmp/anaconda.log">anaconda.log</a></li>
<li><a href="/download/tmp">/tmp of Installation System</a></li>
<li><a href="/download/mnt/sysimage/root">Installed Root Filesystem (/root)</a></li>
</ul></p>
</fieldset>
</div>
<div id="logframe" style="float: right; width: 80%%;">
<fieldset id="contentpanel" style="border: 2px solid grey;"><legend>Content</legend>
<h2 style="color:red">%(error)s</h2>
<p>Click on a link at the left to display the content here</p>
</fieldset>
</div>
<hr/>
<i style="font-size: 80%%">Kickstart Debugger, written by Schlomo Schapiro. Licensed under the <a href="http://www.gnu.org/licenses/gpl.html">GNU General Public License</a>. See my <a href="http://blog.schlomo.schapiro.org">BLOG</a> for news and updates.</i>
</body>
</html>
''',
"/screenshot" : u'''\
<html>
<head><title>%(host)s - Kickstart Debugger Screenshot</title>
</head>
<body>
<img style="cursor:pointer;" onclick="this.src=this.src;" src="%(screenshotUrl)s"/>
</body>
</html>
''',
"/favicon.ico.mimetype":"image/icon",
"/favicon.ico":base64.decodestring('''\
AAABAAEAGBgAAAEAIACICQAAFgAAACgAAAAYAAAAMAAAAAEAIAAAAAAAAAkAANYNAADWDQAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYA
AAATAAAAIgAAAC4AAAArAAAAHgAAABAAAAABAAAAAQAAAAEAAAABAAAAAAAAAAAAAAAGAAAAEwAA
ACIAAAAuAAAAKwAAAB4AAAAQAAAAAQAAAAAAAAAAAAAAAAAAAA1eMhVsZjgZ2UsqEosAAAA4AAAA
MQAAACcAAAAYAAAADwAAABAAAAAQAAAADwAAAA4AAAAYOz4+Y1ZaV+FXWlnaIyYmUAAAACoAAAAd
AAAACwAAAAAAAAAAAAAAAH5GHGOBTCn0kFYq/28/HPNhNhfFSSQPIwAAAA0AAAAPAAAAEAAAABIA
AAASAAAAEQAAABBKT0xhhoqJ+NLY1f9obGr8T09PLQAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAIxQ
KNvNqYr/tn1O/6VoOP+ETSb7ZDgZ4TkcHAkAAAAFAAAABQAAAAYAAAAGAAAABjxLSxFrb27v3+Ph
/8PIxfxma2iRAAAAAGNqYyRma2n4ZWtoVgAAAAAAAAAAAAAAAIlNI4S0i2331rCM/7V8Tf+qb0H/
ekYg9WQ2F4UAAAAAAAAAAAAAAAAAAAAAAAAAAGlubIvLz8376e3r/56ioPlobmgsAAAAAG91c7zO
0tD+eX586QAAAAAAAAAAAAAAAKpVAAOLTyTL17yn/dizkP+2fU7/soBZ/2w8HPViNhYvAAAAAAAA
AAAAAAAAAAAAAHR4duz7+/v/6Ozq/9zg3/5/g4HzdXp4rqitqvn5+vr/gYWD4gAAAAAAAAAAAAAA
AAAAAACETBwbjlMp4cSiifnjzbj/w5Ru/7OKav5uPx/2ZDgZkQAAAAAAAAAAAAAAAH2CgO3/////
8fPy/+fr6P/j6Ob/2t/d/+7y8P/o6+n+f4WCmwAAAAAAAAAAAAAAAAAAAAAAAAAAmTMzBYlNI3uU
Xzn24c28/cqhf//GpY3/ajwd/F0uFwsAAAAAeYJ9PYmOjfXx8vL/+/v7//Lz8v/f4+H/197b//j5
+P+Wm5n2gIyMFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACDTR8hkFcx9fHo4P+ZcVP/cGFR
9VtfXYNpbGxJgYWD9eXm5v/a3tz/9vf3//n5+f/8/fz/+vv7/8jLyvuIjIptAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAiU0jbodQJ/l3ZVT1tLu4/3J3dfRoa2r7vL69/93d3f/4
+fj/rbGw94iMi7uXnJrtnaCe+YaNiGkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAABbX12Dcnh287S7uP93fHr/jpCP/8vLy/+cnp34h42KYgAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACDi4dE
dXl3+nl+fP+0u7j/eH17/3J2dPxvc3NqAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIeKh0aSlZX12dzb/5+ioP97gH7/tLu4/3J3
dfZbX12DAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAhYyI
SY6TkN2LkI/Nh4yJl5GVlPXU19b/3eDf//f49/+ChYT7cnh29bS7uP9yd3XvW19dgwAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACEiodVpKmn+eXo5//u7+//2t3c/M7T0f/V
2tf/9vb2/6mtq/eIi4tYW19dg3J3de+0u7j/cnd171tfXYMAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAICAgAyNkZDv3N/d/9ba2f/i5eT/8/Tz//Lz8//09vX/qayq94iLi1YAAAAAAAAA
AFtfXYNyd3XvtLu4/3J3de9bX12DAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAImOjIrGy8j8
8/Tz//n5+f/6+vr/9PX1//39/f/+/v7/ipCO6gAAAAAAAAAAAAAAAAAAAABbX12Dcnd177S7uP9y
d3XvXWNfgwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI+Tkdzu8O//1NbW+4qPjs+orKr5+/v7//z8
/P//////jJCO8QAAAAAAAAAAAAAAAAAAAAAAAAAAW19dg3J3de/O0tH/goaF72FlY4MAAAAAAAAA
AAAAAAAAAAAAAAAAAI2Tke7Z3Nv/i4+O0P///wGGi4Y5zM7N+/7+/v/y8/P+iI2MnQAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAF1jX4OChoXv+vr6/4mMi+9hZWODAAAAAAAAAAAAAAAAAAAAAImOi2yI
jYv+h4yHMwAAAACHjYt/6Onp/P7+/v+eoqH1hoaGFQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AABhZWODiYyL7/r6+v9obGrzUldXMgAAAAAAAAAAAAAAAP///wGKiooYAAAAAISOhBuRlJL3+Pn5
/9HT0vuJjYtuAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYWVjg2hsavNjZmSl
AAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIaLhjeLkI74jJCP9omPjYYAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFJXVzIAAAABAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD///8AwAYBAMAAAQDAAAcAwAARAMB8EQDAPAEA
4BwBAPAIAQD8AAMA/gAHAP+AfwD/gP8A/wD/APAAfwDgAD8AwAwfAMAeDwDAHwcAwB+DAMQfwQDI
P+EA+H/zAP///wA=''',
)}
# ignore popen2 deprecation warning
import warnings
warnings.filterwarnings("ignore")
import SimpleHTTPServer
import os
import posixpath
import BaseHTTPServer
import urllib
import cgi
import shutil
import string
import mimetypes
import popen2
import socket
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class KickstartDebuggerRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
server_name= socket.gethostname()
extensions_map = {
'' : 'application/octet-stream', # default mime type
'.log': 'text/plain',
'.cfg': 'text/plain',
'.ks': 'text/plain',
'.txt': 'text/plain',
}
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n<table border=0>\n<tr><td colspan='2'><a href='../'>../<a></td></tr>\n")
for name in list:
linkHtml=""
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
linkHtml = "<br/> → <small>%s</small>" % os.path.realpath(fullname)
# Note: a link to a directory displays with @ and links with /
f.write('<tr valign="top"><td><a href="%s">%s</a>%s</td><td align="right">%d</td></tr>\n'
% (urllib.quote(linkname), cgi.escape(displayname), linkHtml, os.path.getsize(fullname)))
f.write("</table>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def getReplacements(self):
'''Build a dictionary with some data that can be used in builtinPages '''
replacements = {}
replacements["host"] = KickstartDebuggerRequestHandler.server_name
replacements["screenshotUrl"] = options.screenshotUrl
ks_file_candidates = [
'/tmp/ks.cfg',
'/run/install/ks.cfg'
]
ks_file = None
for file in ks_file_candidates:
if os.path.isfile(file):
ks_file = file
if ks_file is not None:
replacements["ks_file"] = file
else:
replacements["ks_file"] = "/tmp/false"
if options.screenshotUrl:
replacements["screenshotLink"] = '''<li><a onclick="return loadInContentPanel('/screenshot','Screenshot')" href="/screenshot">Screenshot</a></li>'''
else:
replacements["screenshotLink"] = ""
replacements["error"] = ""
error_file = "/dev/kickstart_debugger_error.txt"
if os.path.isfile(error_file):
f = None
try:
f = open(error_file)
replacements["error"] = f.read()
finally:
if f:
f.close()
else:
replacements["error"] = "Could not read from <code>%s</code>!" % error_file
return replacements
def do_GET(self,onlyHeaders = False):
'''Serve a GET request'''
f = None
f_in = None
# disable caching, found in http://stackoverflow.com/questions/49547/making-sure-a-web-page-is-not-cached-across-all-browsers
if self.path in builtinPages:
f = StringIO()
f.write(builtinPages[self.path] % self.getReplacements())
length = f.tell()
f.seek(0)
self.send_response(200)
if self.path+".mimetype" in builtinPages:
self.send_header("Content-type",builtinPages[self.path+".mimetype"])
else:
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
elif self.path.startswith("/fs"):
self.path=self.path[3:] # strip leading /fs to map /fs to real /
elif self.path.startswith("/download"):
self.path=self.path[9:] # strip leading /download to map /download to real /
try:
isFile=False
downloadSuffix = ""
if os.path.isfile(self.path):
isFile=True
f = open(self.path)
elif os.path.isdir(self.path):
downloadSuffix = ".tar.gz"
# not using builtin tarfile module because python 2.4 tarfile does not support add with exclusions
# also, I am not sure if the tarfile module would do real streaming of the resulting tar.gz or assemble it in memory
(f,f_in) = popen2.popen2("tar -C / -cz --ignore-failed-read --exclude \"*.img\" \"%s\"" % self.path[1:]) # skip leading /
f_in.close()
else:
raise IOError("not a file and not a dir")
self.send_response(200)
self.send_header("Content-type", "application/octet-stream")
# set filename to be like HOSTNAME_path_to_file.txt
self.send_header("Content-Disposition:", "attachment; filename=\"%s%s\"" % (KickstartDebuggerRequestHandler.server_name, (string.replace(self.path,"/","_") + downloadSuffix)))
if isFile:
self.send_header("Content-Length", str(os.path.getsize(self.path)))
self.end_headers()
except IOError ,e:
self.send_error(404,"No permission or error while opening " + self.path + ": " + str(e))
return None
if not f:
# default action from super class if no file set
f = self.send_head()
if f:
if not onlyHeaders:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
self.doGET(True)
def end_headers(self):
"""Send standard headers and end header sending"""
self.send_header("Cache-Control","no-cache, no-store, must-revalidate")
self.send_header("Pragma","no-cache")
self.send_header("Expires:","0")
SimpleHTTPServer.SimpleHTTPRequestHandler.end_headers(self)
if __name__ == "__main__":
usage='''Kickstart Debugger is a simple web tool meant to run inside a kickstart/anaconda
installation system.'''
import optparse
import sys
import SocketServer
parser = optparse.OptionParser(usage=usage,version=__version__)
parser.add_option("-p","--port",dest="listenPort",default="80",type="int",metavar="PORT",help="Listening port for web server [%default]")
parser.add_option("-s","--screenshot",dest="screenshotUrl",default="",type="string",metavar="SCREENSHOT",help="URL to screenshot")
options, arguments = parser.parse_args()
os.chdir("/")
httpd = SocketServer.TCPServer(("", options.listenPort), KickstartDebuggerRequestHandler)
#KickstartDebuggerRequestHandler.server_name = socket.gethostname()
print "Starting Kickstart Debugger on port %s" % options.listenPort
httpd.serve_forever()
|
ImmobilienScout24/kickstart-debugger
|
kickstart-debugger.py
|
Python
|
gpl-3.0
| 15,275
|
#!/usr/bin/env python
# encoding: utf-8
"""Not really a lexer in the classical sense, but code to convert snippet
definitions into logical units called Tokens."""
import string
import re
from UltiSnips.position import Position
from UltiSnips.text import unescape
class _TextIterator:
"""Helper class to make iterating over text easier."""
def __init__(self, text, offset):
self._text = text
self._line = offset.line
self._col = offset.col
self._idx = 0
def __iter__(self):
"""Iterator interface."""
return self
def __next__(self):
"""Returns the next character."""
if self._idx >= len(self._text):
raise StopIteration
rv = self._text[self._idx]
if self._text[self._idx] in ("\n", "\r\n"):
self._line += 1
self._col = 0
else:
self._col += 1
self._idx += 1
return rv
def peek(self, count=1):
"""Returns the next 'count' characters without advancing the stream."""
if count > 1: # This might return '' if nothing is found
return self._text[self._idx : self._idx + count]
try:
return self._text[self._idx]
except IndexError:
return None
@property
def pos(self):
"""Current position in the text."""
return Position(self._line, self._col)
def _parse_number(stream):
"""Expects the stream to contain a number next, returns the number without
consuming any more bytes."""
rv = ""
while stream.peek() and stream.peek() in string.digits:
rv += next(stream)
return int(rv)
def _parse_till_closing_brace(stream):
"""
Returns all chars till a non-escaped } is found. Other
non escaped { are taken into account and skipped over.
Will also consume the closing }, but not return it
"""
rv = ""
in_braces = 1
while True:
if EscapeCharToken.starts_here(stream, "{}"):
rv += next(stream) + next(stream)
else:
char = next(stream)
if char == "{":
in_braces += 1
elif char == "}":
in_braces -= 1
if in_braces == 0:
break
rv += char
return rv
def _parse_till_unescaped_char(stream, chars):
"""
Returns all chars till a non-escaped char is found.
Will also consume the closing char, but and return it as second
return value
"""
rv = ""
while True:
escaped = False
for char in chars:
if EscapeCharToken.starts_here(stream, char):
rv += next(stream) + next(stream)
escaped = True
if not escaped:
char = next(stream)
if char in chars:
break
rv += char
return rv, char
class Token:
"""Represents a Token as parsed from a snippet definition."""
def __init__(self, gen, indent):
self.initial_text = ""
self.start = gen.pos
self._parse(gen, indent)
self.end = gen.pos
def _parse(self, stream, indent):
"""Parses the token from 'stream' with the current 'indent'."""
pass # Does nothing
class TabStopToken(Token):
"""${1:blub}"""
CHECK = re.compile(r"^\${\d+[:}]")
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(10)) is not None
def _parse(self, stream, indent):
next(stream) # $
next(stream) # {
self.number = _parse_number(stream)
if stream.peek() == ":":
next(stream)
self.initial_text = _parse_till_closing_brace(stream)
def __repr__(self):
return "TabStopToken(%r,%r,%r,%r)" % (
self.start,
self.end,
self.number,
self.initial_text,
)
class VisualToken(Token):
"""${VISUAL}"""
CHECK = re.compile(r"^\${VISUAL[:}/]")
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(10)) is not None
def _parse(self, stream, indent):
for _ in range(8): # ${VISUAL
next(stream)
if stream.peek() == ":":
next(stream)
self.alternative_text, char = _parse_till_unescaped_char(stream, "/}")
self.alternative_text = unescape(self.alternative_text)
if char == "/": # Transformation going on
try:
self.search = _parse_till_unescaped_char(stream, "/")[0]
self.replace = _parse_till_unescaped_char(stream, "/")[0]
self.options = _parse_till_closing_brace(stream)
except StopIteration:
raise RuntimeError(
"Invalid ${VISUAL} transformation! Forgot to escape a '/'?"
)
else:
self.search = None
self.replace = None
self.options = None
def __repr__(self):
return "VisualToken(%r,%r)" % (self.start, self.end)
class TransformationToken(Token):
"""${1/match/replace/options}"""
CHECK = re.compile(r"^\${\d+\/")
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(10)) is not None
def _parse(self, stream, indent):
next(stream) # $
next(stream) # {
self.number = _parse_number(stream)
next(stream) # /
self.search = _parse_till_unescaped_char(stream, "/")[0]
self.replace = _parse_till_unescaped_char(stream, "/")[0]
self.options = _parse_till_closing_brace(stream)
def __repr__(self):
return "TransformationToken(%r,%r,%r,%r,%r)" % (
self.start,
self.end,
self.number,
self.search,
self.replace,
)
class MirrorToken(Token):
"""$1."""
CHECK = re.compile(r"^\$\d+")
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(10)) is not None
def _parse(self, stream, indent):
next(stream) # $
self.number = _parse_number(stream)
def __repr__(self):
return "MirrorToken(%r,%r,%r)" % (self.start, self.end, self.number)
class ChoicesToken(Token):
"""${1|o1,o2,o3|}
P.S. This is not a subclass of TabStop,
so its content will not be parsed recursively.
"""
CHECK = re.compile(r"^\${\d+\|")
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(10)) is not None
def _parse(self, stream, indent):
next(stream) # $
next(stream) # {
self.number = _parse_number(stream)
if self.number == 0:
raise RuntimeError(
"Choices selection is not supported on $0"
)
next(stream) # |
choices_text = _parse_till_unescaped_char(stream, "|")[0]
choice_list = []
# inside choice item, comma can be escaped by "\,"
# we need to do a little bit smarter parsing than simply splitting
choice_stream = _TextIterator(choices_text, Position(0, 0))
while True:
cur_col = choice_stream.pos.col
try:
result = _parse_till_unescaped_char(choice_stream, ",")[0]
if not result:
continue
choice_list.append(self._get_unescaped_choice_item(result))
except:
last_choice_item = self._get_unescaped_choice_item(choices_text[cur_col:])
if last_choice_item:
choice_list.append(last_choice_item)
break
self.choice_list = choice_list
self.initial_text = "|{0}|".format(",".join(choice_list))
_parse_till_closing_brace(stream)
def _get_unescaped_choice_item(self, escaped_choice_item):
"""unescape common inside choice item"""
return escaped_choice_item.replace(r"\,", ",")
def __repr__(self):
return "ChoicesToken(%r,%r,%r,|%r|)" % (
self.start,
self.end,
self.number,
self.initial_text,
)
class EscapeCharToken(Token):
"""\\n."""
@classmethod
def starts_here(cls, stream, chars=r"{}\$`"):
"""Returns true if this token starts at the current position in
'stream'."""
cs = stream.peek(2)
if len(cs) == 2 and cs[0] == "\\" and cs[1] in chars:
return True
def _parse(self, stream, indent):
next(stream) # \
self.initial_text = next(stream)
def __repr__(self):
return "EscapeCharToken(%r,%r,%r)" % (self.start, self.end, self.initial_text)
class ShellCodeToken(Token):
"""`echo "hi"`"""
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return stream.peek(1) == "`"
def _parse(self, stream, indent):
next(stream) # `
self.code = _parse_till_unescaped_char(stream, "`")[0]
def __repr__(self):
return "ShellCodeToken(%r,%r,%r)" % (self.start, self.end, self.code)
class PythonCodeToken(Token):
"""`!p snip.rv = "Hi"`"""
CHECK = re.compile(r"^`!p\s")
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(4)) is not None
def _parse(self, stream, indent):
for _ in range(3):
next(stream) # `!p
if stream.peek() in "\t ":
next(stream)
code = _parse_till_unescaped_char(stream, "`")[0]
# Strip the indent if any
if len(indent):
lines = code.splitlines()
self.code = lines[0] + "\n"
self.code += "\n".join([l[len(indent) :] for l in lines[1:]])
else:
self.code = code
self.indent = indent
def __repr__(self):
return "PythonCodeToken(%r,%r,%r)" % (self.start, self.end, self.code)
class VimLCodeToken(Token):
"""`!v g:hi`"""
CHECK = re.compile(r"^`!v\s")
@classmethod
def starts_here(cls, stream):
"""Returns true if this token starts at the current position in
'stream'."""
return cls.CHECK.match(stream.peek(4)) is not None
def _parse(self, stream, indent):
for _ in range(4):
next(stream) # `!v
self.code = _parse_till_unescaped_char(stream, "`")[0]
def __repr__(self):
return "VimLCodeToken(%r,%r,%r)" % (self.start, self.end, self.code)
class EndOfTextToken(Token):
"""Appears at the end of the text."""
def __repr__(self):
return "EndOfText(%r)" % self.end
def tokenize(text, indent, offset, allowed_tokens):
"""Returns an iterator of tokens of 'text'['offset':] which is assumed to
have 'indent' as the whitespace of the begging of the lines. Only
'allowed_tokens' are considered to be valid tokens."""
stream = _TextIterator(text, offset)
try:
while True:
done_something = False
for token in allowed_tokens:
if token.starts_here(stream):
yield token(stream, indent)
done_something = True
break
if not done_something:
next(stream)
except StopIteration:
yield EndOfTextToken(stream, indent)
|
kzlin129/ultisnips
|
pythonx/UltiSnips/snippet/parsing/lexer.py
|
Python
|
gpl-3.0
| 11,949
|
def is_left_obstacle(lbot, threshold = 200):
sonarsValue = lbot.getSonars()["left"]
if min(sonarsValue) < threshold:
return True
return False
|
ibarbech/learnbot
|
learnbot_dsl/functions/perceptual/distancesensors/is_left_obstacle.py
|
Python
|
gpl-3.0
| 147
|
# -*- coding: utf-8 -*-
#
# This file is part of the shibboleth-authenticator module for Invenio.
# Copyright (C) 2017 Helmholtz-Zentrum Dresden-Rossendorf
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Utility methods to help find, authenticate or register a remote user."""
from __future__ import absolute_import, print_function
import uritools
from flask import current_app, request
from werkzeug.local import LocalProxy
from wtforms.fields.core import FormField
_security = LocalProxy(lambda: current_app.extensions['security'])
_datastore = LocalProxy(lambda: _security.datastore)
def get_account_info(attributes, remote_app):
"""Return account info for remote user."""
mappings = current_app.config['SHIBBOLETH_REMOTE_APPS'][
remote_app]['mappings']
email = attributes[mappings['email']][0]
external_id = attributes[mappings['user_unique_id']][0]
full_name = attributes[mappings['full_name']][0]
return dict(
user=dict(
email=email,
profile=dict(
full_name=full_name,
username=external_id.split('@')[0],
),
),
external_id=external_id,
external_method=remote_app,
)
def get_safe_redirect_target(arg='next'):
"""Get URL to redirect to and ensure that it is local.
:param arg: URL argument.
:returns: The redirect target or ``None``.
"""
for target in request.args.get(arg), request.referrer:
if target:
redirect_uri = uritools.urisplit(target)
allowed_hosts = current_app.config.get('APP_ALLOWED_HOSTS', [])
if redirect_uri.host in allowed_hosts:
return target
elif redirect_uri.path:
return uritools.uricompose(
path=redirect_uri.path,
query=redirect_uri.query
)
return None
|
tobiasfrust/shibboleth-authenticator
|
shibboleth_authenticator/utils.py
|
Python
|
gpl-3.0
| 2,488
|
import os
from unittest import TestCase
from unittest.mock import PropertyMock, patch
from libres_utils import tmpdir
from job_runner.job import Job
from job_runner.reporting.message import Exited, Running, Start
class JobTests(TestCase):
@patch("job_runner.job.assert_file_executable")
@patch("job_runner.job.Popen")
@patch("job_runner.job.Process")
@tmpdir(None)
def test_run_with_process_failing(
self, mock_process, mock_popen, mock_assert_file_executable
):
job = Job({}, 0)
type(mock_process.return_value.memory_info.return_value).rss = PropertyMock(
return_value=10
)
mock_process.return_value.wait.return_value = 9
run = job.run()
self.assertIsInstance(next(run), Start, "run did not yield Start message")
self.assertIsInstance(next(run), Running, "run did not yield Running message")
exited = next(run)
self.assertIsInstance(exited, Exited, "run did not yield Exited message")
self.assertEqual(9, exited.exit_code, "Exited message had unexpected exit code")
with self.assertRaises(StopIteration):
next(run)
@tmpdir(None)
def test_run_fails_using_exit_bash_builtin(self):
job = Job(
{
"name": "exit 1",
"executable": "/bin/bash",
"stdout": "exit_out",
"stderr": "exit_err",
"argList": ["-c", 'echo "failed with {}" 1>&2 ; exit {}'.format(1, 1)],
},
0,
)
statuses = list(job.run())
self.assertEqual(3, len(statuses), "Wrong statuses count")
self.assertEqual(1, statuses[2].exit_code, "Exited status wrong exit_code")
self.assertEqual(
"Process exited with status code 1",
statuses[2].error_message,
"Exited status wrong error_message",
)
@tmpdir(None)
def test_run_with_defined_executable_but_missing(self):
executable = os.path.join(os.getcwd(), "this/is/not/a/file")
job = Job(
{
"name": "TEST_EXECUTABLE_NOT_FOUND",
"executable": executable,
"stdout": "mkdir_out",
"stderr": "mkdir_err",
},
0,
)
with self.assertRaises(IOError):
for _ in job.run():
pass
@tmpdir(None)
def test_run_with_defined_executable_no_exec_bit(self):
non_executable = os.path.join(os.getcwd(), "foo")
with open(non_executable, "a"):
pass
job = Job(
{
"name": "TEST_EXECUTABLE_NOT_EXECUTABLE",
"executable": non_executable,
"stdout": "mkdir_out",
"stderr": "mkdir_err",
},
0,
)
with self.assertRaises(IOError):
for _ in job.run():
pass
def test_init_job_no_std():
job = Job(
{},
0,
)
assert job.std_err is None
assert job.std_out is None
def test_init_job_with_std():
job = Job(
{
"stdout": "exit_out",
"stderr": "exit_err",
},
0,
)
assert job.std_err == "exit_err"
assert job.std_out == "exit_out"
|
joakim-hove/ert
|
tests/libres_tests/job_runner/test_job.py
|
Python
|
gpl-3.0
| 3,312
|
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2011> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from nose.tools import with_setup
from os.path import dirname, join, abspath
from lettuce import Runner
import lettuce
from lettuce.core import fs, StepDefinition
from tests.asserts import prepare_stdout
from tests.asserts import assert_stdout_lines
from tests.asserts import assert_stdout_lines_with_traceback
current_dir = abspath(dirname(__file__))
lettuce_dir = abspath(dirname(lettuce.__file__))
lettuce_path = lambda *x: fs.relpath(join(lettuce_dir, *x))
call_line = StepDefinition.__call__.im_func.func_code.co_firstlineno + 5
def path_to_feature(name):
return join(abspath(dirname(__file__)), 'behave_as_features', name, "%s.feature" % name)
@with_setup(prepare_stdout)
def test_simple_behave_as_feature():
"Basic step.behave_as behaviour is working"
Runner(path_to_feature('1st_normal_steps'), verbosity=3).run()
assert_stdout_lines(
"\n"
"Feature: Multiplication # tests/functional/behave_as_features/1st_normal_steps/1st_normal_steps.feature:2\n"
" In order to avoid silly mistakes # tests/functional/behave_as_features/1st_normal_steps/1st_normal_steps.feature:3\n"
" Cashiers must be able to multiplicate numbers :) # tests/functional/behave_as_features/1st_normal_steps/1st_normal_steps.feature:4\n"
"\n"
" Scenario: Regular numbers # tests/functional/behave_as_features/1st_normal_steps/1st_normal_steps.feature:6\n"
" Given I have entered 10 into the calculator # tests/functional/behave_as_features/1st_normal_steps/simple_step_definitions.py:11\n"
" And I have entered 4 into the calculator # tests/functional/behave_as_features/1st_normal_steps/simple_step_definitions.py:11\n"
" When I press multiply # tests/functional/behave_as_features/1st_normal_steps/simple_step_definitions.py:15\n"
" Then the result should be 40 on the screen # tests/functional/behave_as_features/1st_normal_steps/simple_step_definitions.py:19\n"
"\n"
" Scenario: Shorter version of the scenario above # tests/functional/behave_as_features/1st_normal_steps/1st_normal_steps.feature:12\n"
" Given I multiply 10 and 4 into the calculator # tests/functional/behave_as_features/1st_normal_steps/simple_step_definitions.py:23\n"
" Then the result should be 40 on the screen # tests/functional/behave_as_features/1st_normal_steps/simple_step_definitions.py:19\n"
"\n"
"1 feature (1 passed)\n"
"2 scenarios (2 passed)\n"
"6 steps (6 passed)\n"
)
@with_setup(prepare_stdout)
def test_simple_tables_behave_as_feature():
"Basic step.behave_as behaviour is working"
Runner(path_to_feature('2nd_table_steps'), verbosity=3).run()
assert_stdout_lines(
"\n"
"Feature: Multiplication # tests/functional/behave_as_features/2nd_table_steps/2nd_table_steps.feature:2\n"
" In order to avoid silly mistakes # tests/functional/behave_as_features/2nd_table_steps/2nd_table_steps.feature:3\n"
" Cashiers must be able to multiplicate numbers :) # tests/functional/behave_as_features/2nd_table_steps/2nd_table_steps.feature:4\n"
"\n"
" Scenario: Regular numbers # tests/functional/behave_as_features/2nd_table_steps/2nd_table_steps.feature:6\n"
" Given I multiply these numbers: # tests/functional/behave_as_features/2nd_table_steps/simple_tables_step_definitions.py:31\n"
" | number |\n"
" | 55 |\n"
" | 2 |\n"
" Then the result should be 110 on the screen # tests/functional/behave_as_features/2nd_table_steps/simple_tables_step_definitions.py:19\n"
"\n"
" Scenario: Shorter version of the scenario above # tests/functional/behave_as_features/2nd_table_steps/2nd_table_steps.feature:13\n"
" Given I multiply 55 and 2 into the calculator # tests/functional/behave_as_features/2nd_table_steps/simple_tables_step_definitions.py:23\n"
" Then the result should be 110 on the screen # tests/functional/behave_as_features/2nd_table_steps/simple_tables_step_definitions.py:19\n"
"\n"
"1 feature (1 passed)\n"
"2 scenarios (2 passed)\n"
"4 steps (4 passed)\n"
)
@with_setup(prepare_stdout)
def test_failing_tables_behave_as_feature():
"Basic step.behave_as behaviour is working"
Runner(path_to_feature('3rd_failing_steps'), verbosity=3).run()
assert_stdout_lines_with_traceback(
'\n'
'Feature: Multiplication # tests/functional/behave_as_features/3rd_failing_steps/3rd_failing_steps.feature:2\n'
' In order to avoid silly mistakes # tests/functional/behave_as_features/3rd_failing_steps/3rd_failing_steps.feature:3\n'
' Cashiers must be able to multiplicate numbers :) # tests/functional/behave_as_features/3rd_failing_steps/3rd_failing_steps.feature:4\n'
'\n'
' Scenario: Regular numbers # tests/functional/behave_as_features/3rd_failing_steps/3rd_failing_steps.feature:6\n'
' Given I have entered 10 into the calculator # tests/functional/behave_as_features/3rd_failing_steps/failing_step_definitions.py:11\n'
' Traceback (most recent call last):\n'
' File "%(lettuce_core_file)s", line %(call_line)d, in __call__\n'
' ret = self.function(self.step, *args, **kw)\n'
' File "%(step_file)s", line 13, in i_have_entered_NUM_into_the_calculator\n'
' assert False, \'Die, die, die my darling!\'\n'
' AssertionError: Die, die, die my darling!\n'
' And I have entered 4 into the calculator # tests/functional/behave_as_features/3rd_failing_steps/failing_step_definitions.py:11\n'
' When I press multiply # tests/functional/behave_as_features/3rd_failing_steps/failing_step_definitions.py:16\n'
' Then the result should be 40 on the screen # tests/functional/behave_as_features/3rd_failing_steps/failing_step_definitions.py:20\n'
'\n'
' Scenario: Shorter version of the scenario above # tests/functional/behave_as_features/3rd_failing_steps/3rd_failing_steps.feature:12\n'
' Given I multiply 10 and 4 into the calculator # tests/functional/behave_as_features/3rd_failing_steps/failing_step_definitions.py:24\n'
' Traceback (most recent call last):\n'
' File "%(lettuce_core_file)s", line %(call_line)d, in __call__\n'
' ret = self.function(self.step, *args, **kw)\n'
' File "%(step_file)s", line 29, in multiply_X_and_Y_into_the_calculator\n'
' \'\'\'.format(x, y))\n'
' File "%(lettuce_core_file)s", line %(call_line)d, in __call__\n'
' assert not steps_failed, steps_failed[0].why.exception\n'
' AssertionError: Die, die, die my darling!\n'
' Then the result should be 40 on the screen # tests/functional/behave_as_features/3rd_failing_steps/failing_step_definitions.py:20\n'
'\n'
'1 feature (0 passed)\n'
'2 scenarios (2 failed, 0 passed)\n'
'6 steps (2 failed, 4 skipped, 0 passed)\n' % {
'lettuce_core_file': lettuce_path('core.py'),
'step_file': abspath(lettuce_path('..', 'tests', 'functional', 'behave_as_features', '3rd_failing_steps', 'failing_step_definitions.py')),
'call_line':call_line,
}
)
|
scaphe/lettuce-dirty
|
tests/functional/test_behave_as_handling.py
|
Python
|
gpl-3.0
| 8,334
|
""" Integral Transforms """
from __future__ import print_function, division
from sympy.core import S
from sympy.core.compatibility import reduce
from sympy.core.function import Function
from sympy.core.numbers import oo
from sympy.core.symbol import Dummy
from sympy.integrals import integrate, Integral
from sympy.integrals.meijerint import _dummy
from sympy.logic.boolalg import to_cnf, conjuncts, disjuncts, Or, And
from sympy.simplify import simplify
from sympy.utilities import default_sort_key
##########################################################################
# Helpers / Utilities
##########################################################################
class IntegralTransformError(NotImplementedError):
"""
Exception raised in relation to problems computing transforms.
This class is mostly used internally; if integrals cannot be computed
objects representing unevaluated transforms are usually returned.
The hint ``needeval=True`` can be used to disable returning transform
objects, and instead raise this exception if an integral cannot be
computed.
"""
def __init__(self, transform, function, msg):
super(IntegralTransformError, self).__init__(
"%s Transform could not be computed: %s." % (transform, msg))
self.function = function
class IntegralTransform(Function):
"""
Base class for integral transforms.
This class represents unevaluated transforms.
To implement a concrete transform, derive from this class and implement
the _compute_transform(f, x, s, **hints) and _as_integral(f, x, s)
functions. If the transform cannot be computed, raise IntegralTransformError.
Also set cls._name.
Implement self._collapse_extra if your function returns more than just a
number and possibly a convergence condition.
"""
@property
def function(self):
""" The function to be transformed. """
return self.args[0]
@property
def function_variable(self):
""" The dependent variable of the function to be transformed. """
return self.args[1]
@property
def transform_variable(self):
""" The independent transform variable. """
return self.args[2]
@property
def free_symbols(self):
"""
This method returns the symbols that will exist when the transform
is evaluated.
"""
return self.function.free_symbols.union(set([self.transform_variable])) \
- set([self.function_variable])
def _compute_transform(self, f, x, s, **hints):
raise NotImplementedError
def _as_integral(self, f, x, s):
raise NotImplementedError
def _collapse_extra(self, extra):
from sympy import And
cond = And(*extra)
if cond == False:
raise IntegralTransformError(self.__class__.name, None, '')
def doit(self, **hints):
"""
Try to evaluate the transform in closed form.
This general function handles linearity, but apart from that leaves
pretty much everything to _compute_transform.
Standard hints are the following:
- ``simplify``: whether or not to simplify the result
- ``noconds``: if True, don't return convergence conditions
- ``needeval``: if True, raise IntegralTransformError instead of
returning IntegralTransform objects
The default values of these hints depend on the concrete transform,
usually the default is
``(simplify, noconds, needeval) = (True, False, False)``.
"""
from sympy import Add, expand_mul, Mul
from sympy.core.function import AppliedUndef
needeval = hints.pop('needeval', False)
try_directly = not any(func.has(self.function_variable)
for func in self.function.atoms(AppliedUndef))
if try_directly:
try:
return self._compute_transform(self.function,
self.function_variable, self.transform_variable, **hints)
except IntegralTransformError:
pass
fn = self.function
if not fn.is_Add:
fn = expand_mul(fn)
if fn.is_Add:
hints['needeval'] = needeval
res = [self.__class__(*([x] + list(self.args[1:]))).doit(**hints)
for x in fn.args]
extra = []
ress = []
for x in res:
if not isinstance(x, tuple):
x = [x]
ress.append(x[0])
if len(x) > 1:
extra += [x[1:]]
res = Add(*ress)
if not extra:
return res
try:
extra = self._collapse_extra(extra)
return tuple([res]) + tuple(extra)
except IntegralTransformError:
pass
if needeval:
raise IntegralTransformError(
self.__class__._name, self.function, 'needeval')
# TODO handle derivatives etc
# pull out constant coefficients
coeff, rest = fn.as_coeff_mul(self.function_variable)
return coeff*self.__class__(*([Mul(*rest)] + list(self.args[1:])))
@property
def as_integral(self):
return self._as_integral(self.function, self.function_variable,
self.transform_variable)
def _eval_rewrite_as_Integral(self, *args):
return self.as_integral
from sympy.solvers.inequalities import _solve_inequality
def _simplify(expr, doit):
from sympy import powdenest, piecewise_fold
if doit:
return simplify(powdenest(piecewise_fold(expr), polar=True))
return expr
def _noconds_(default):
"""
This is a decorator generator for dropping convergence conditions.
Suppose you define a function ``transform(*args)`` which returns a tuple of
the form ``(result, cond1, cond2, ...)``.
Decorating it ``@_noconds_(default)`` will add a new keyword argument
``noconds`` to it. If ``noconds=True``, the return value will be altered to
be only ``result``, whereas if ``noconds=False`` the return value will not
be altered.
The default value of the ``noconds`` keyword will be ``default`` (i.e. the
argument of this function).
"""
def make_wrapper(func):
from sympy.core.decorators import wraps
@wraps(func)
def wrapper(*args, **kwargs):
noconds = kwargs.pop('noconds', default)
res = func(*args, **kwargs)
if noconds:
return res[0]
return res
return wrapper
return make_wrapper
_noconds = _noconds_(False)
##########################################################################
# Mellin Transform
##########################################################################
def _default_integrator(f, x):
return integrate(f, (x, 0, oo))
@_noconds
def _mellin_transform(f, x, s_, integrator=_default_integrator, simplify=True):
""" Backend function to compute Mellin transforms. """
from sympy import re, Max, Min, count_ops
# We use a fresh dummy, because assumptions on s might drop conditions on
# convergence of the integral.
s = _dummy('s', 'mellin-transform', f)
F = integrator(x**(s - 1) * f, x)
if not F.has(Integral):
return _simplify(F.subs(s, s_), simplify), (-oo, oo), True
if not F.is_Piecewise:
raise IntegralTransformError('Mellin', f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(
'Mellin', f, 'integral in unexpected form')
def process_conds(cond):
"""
Turn ``cond`` into a strip (a, b), and auxiliary conditions.
"""
a = -oo
b = oo
aux = True
conds = conjuncts(to_cnf(cond))
t = Dummy('t', real=True)
for c in conds:
a_ = oo
b_ = -oo
aux_ = []
for d in disjuncts(c):
d_ = d.replace(
re, lambda x: x.as_real_imag()[0]).subs(re(s), t)
if not d.is_Relational or \
d.rel_op not in ('>', '>=', '<', '<=') \
or d_.has(s) or not d_.has(t):
aux_ += [d]
continue
soln = _solve_inequality(d_, t)
if not soln.is_Relational or \
soln.rel_op not in ('>', '>=', '<', '<='):
aux_ += [d]
continue
if soln.lts == t:
b_ = Max(soln.gts, b_)
else:
a_ = Min(soln.lts, a_)
if a_ != oo and a_ != b:
a = Max(a_, a)
elif b_ != -oo and b_ != a:
b = Min(b_, b)
else:
aux = And(aux, Or(*aux_))
return a, b, aux
conds = [process_conds(c) for c in disjuncts(cond)]
conds = [x for x in conds if x[2] != False]
conds.sort(key=lambda x: (x[0] - x[1], count_ops(x[2])))
if not conds:
raise IntegralTransformError('Mellin', f, 'no convergence found')
a, b, aux = conds[0]
return _simplify(F.subs(s, s_), simplify), (a, b), aux
class MellinTransform(IntegralTransform):
"""
Class representing unevaluated Mellin transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute Mellin transforms, see the :func:`mellin_transform`
docstring.
"""
_name = 'Mellin'
def _compute_transform(self, f, x, s, **hints):
return _mellin_transform(f, x, s, **hints)
def _as_integral(self, f, x, s):
from sympy import Integral
return Integral(f*x**(s - 1), (x, 0, oo))
def _collapse_extra(self, extra):
from sympy import And, Max, Min
a = []
b = []
cond = []
for (sa, sb), c in extra:
a += [sa]
b += [sb]
cond += [c]
res = (Max(*a), Min(*b)), And(*cond)
if (res[0][0] >= res[0][1]) == True or res[1] == False:
raise IntegralTransformError(
'Mellin', None, 'no combined convergence.')
return res
def mellin_transform(f, x, s, **hints):
r"""
Compute the Mellin transform `F(s)` of `f(x)`,
.. math :: F(s) = \int_0^\infty x^{s-1} f(x) \mathrm{d}x.
For all "sensible" functions, this converges absolutely in a strip
`a < \operatorname{Re}(s) < b`.
The Mellin transform is related via change of variables to the Fourier
transform, and also to the (bilateral) Laplace transform.
This function returns ``(F, (a, b), cond)``
where ``F`` is the Mellin transform of ``f``, ``(a, b)`` is the fundamental strip
(as above), and ``cond`` are auxiliary convergence conditions.
If the integral cannot be computed in closed form, this function returns
an unevaluated :class:`MellinTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=False``,
then only `F` will be returned (i.e. not ``cond``, and also not the strip
``(a, b)``).
>>> from sympy.integrals.transforms import mellin_transform
>>> from sympy import exp
>>> from sympy.abc import x, s
>>> mellin_transform(exp(-x), x, s)
(gamma(s), (0, oo), True)
See Also
========
inverse_mellin_transform, laplace_transform, fourier_transform
hankel_transform, inverse_hankel_transform
"""
return MellinTransform(f, x, s).doit(**hints)
def _rewrite_sin(m_n, s, a, b):
"""
Re-write the sine function ``sin(m*s + n)`` as gamma functions, compatible
with the strip (a, b).
Return ``(gamma1, gamma2, fac)`` so that ``f == fac/(gamma1 * gamma2)``.
>>> from sympy.integrals.transforms import _rewrite_sin
>>> from sympy import pi, S
>>> from sympy.abc import s
>>> _rewrite_sin((pi, 0), s, 0, 1)
(gamma(s), gamma(-s + 1), pi)
>>> _rewrite_sin((pi, 0), s, 1, 0)
(gamma(s - 1), gamma(-s + 2), -pi)
>>> _rewrite_sin((pi, 0), s, -1, 0)
(gamma(s + 1), gamma(-s), -pi)
>>> _rewrite_sin((pi, pi/2), s, S(1)/2, S(3)/2)
(gamma(s - 1/2), gamma(-s + 3/2), -pi)
>>> _rewrite_sin((pi, pi), s, 0, 1)
(gamma(s), gamma(-s + 1), -pi)
>>> _rewrite_sin((2*pi, 0), s, 0, S(1)/2)
(gamma(2*s), gamma(-2*s + 1), pi)
>>> _rewrite_sin((2*pi, 0), s, S(1)/2, 1)
(gamma(2*s - 1), gamma(-2*s + 2), -pi)
"""
# (This is a separate function because it is moderately complicated,
# and I want to doctest it.)
# We want to use pi/sin(pi*x) = gamma(x)*gamma(1-x).
# But there is one comlication: the gamma functions determine the
# inegration contour in the definition of the G-function. Usually
# it would not matter if this is slightly shifted, unless this way
# we create an undefined function!
# So we try to write this in such a way that the gammas are
# eminently on the right side of the strip.
from sympy import expand_mul, pi, ceiling, gamma, re
m, n = m_n
m = expand_mul(m/pi)
n = expand_mul(n/pi)
r = ceiling(-m*a - n.as_real_imag()[0]) # Don't use re(n), does not expand
return gamma(m*s + n + r), gamma(1 - n - r - m*s), (-1)**r*pi
class MellinTransformStripError(ValueError):
"""
Exception raised by _rewrite_gamma. Mainly for internal use.
"""
pass
def _rewrite_gamma(f, s, a, b):
"""
Try to rewrite the product f(s) as a product of gamma functions,
so that the inverse Mellin transform of f can be expressed as a meijer
G function.
Return (an, ap), (bm, bq), arg, exp, fac such that
G((an, ap), (bm, bq), arg/z**exp)*fac is the inverse Mellin transform of f(s).
Raises IntegralTransformError or MellinTransformStripError on failure.
It is asserted that f has no poles in the fundamental strip designated by
(a, b). One of a and b is allowed to be None. The fundamental strip is
important, because it determines the inversion contour.
This function can handle exponentials, linear factors, trigonometric
functions.
This is a helper function for inverse_mellin_transform that will not
attempt any transformations on f.
>>> from sympy.integrals.transforms import _rewrite_gamma
>>> from sympy.abc import s
>>> from sympy import oo
>>> _rewrite_gamma(s*(s+3)*(s-1), s, -oo, oo)
(([], [-3, 0, 1]), ([-2, 1, 2], []), 1, 1, -1)
>>> _rewrite_gamma((s-1)**2, s, -oo, oo)
(([], [1, 1]), ([2, 2], []), 1, 1, 1)
Importance of the fundamental strip:
>>> _rewrite_gamma(1/s, s, 0, oo)
(([1], []), ([], [0]), 1, 1, 1)
>>> _rewrite_gamma(1/s, s, None, oo)
(([1], []), ([], [0]), 1, 1, 1)
>>> _rewrite_gamma(1/s, s, 0, None)
(([1], []), ([], [0]), 1, 1, 1)
>>> _rewrite_gamma(1/s, s, -oo, 0)
(([], [1]), ([0], []), 1, 1, -1)
>>> _rewrite_gamma(1/s, s, None, 0)
(([], [1]), ([0], []), 1, 1, -1)
>>> _rewrite_gamma(1/s, s, -oo, None)
(([], [1]), ([0], []), 1, 1, -1)
>>> _rewrite_gamma(2**(-s+3), s, -oo, oo)
(([], []), ([], []), 1/2, 1, 8)
"""
from itertools import repeat
from sympy import (Poly, gamma, Mul, re, RootOf, exp as exp_, E, expand,
roots, ilcm, pi, sin, cos, tan, cot, igcd, exp_polar)
# Our strategy will be as follows:
# 1) Guess a constant c such that the inversion integral should be
# performed wrt s'=c*s (instead of plain s). Write s for s'.
# 2) Process all factors, rewrite them independently as gamma functions in
# argument s, or exponentials of s.
# 3) Try to transform all gamma functions s.t. they have argument
# a+s or a-s.
# 4) Check that the resulting G function parameters are valid.
# 5) Combine all the exponentials.
a_, b_ = S([a, b])
def left(c, is_numer):
"""
Decide whether pole at c lies to the left of the fundamental strip.
"""
# heuristically, this is the best chance for us to solve the inequalities
c = expand(re(c))
if a_ is None:
return c < b_
if b_ is None:
return c <= a_
if (c >= b_) is True:
return False
if (c <= a_) is True:
return True
if is_numer:
return None
if a_.free_symbols or b_.free_symbols or c.free_symbols:
return None # XXX
#raise IntegralTransformError('Inverse Mellin', f,
# 'Could not determine position of singularity %s'
# ' relative to fundamental strip' % c)
raise MellinTransformStripError('Pole inside critical strip?')
# 1)
s_multipliers = []
for g in f.atoms(gamma):
if not g.has(s):
continue
arg = g.args[0]
if arg.is_Add:
arg = arg.as_independent(s)[1]
coeff, _ = arg.as_coeff_mul(s)
s_multipliers += [coeff]
for g in f.atoms(sin, cos, tan, cot):
if not g.has(s):
continue
arg = g.args[0]
if arg.is_Add:
arg = arg.as_independent(s)[1]
coeff, _ = arg.as_coeff_mul(s)
s_multipliers += [coeff/pi]
s_multipliers = [abs(x) for x in s_multipliers if x.is_real]
common_coefficient = S(1)
for x in s_multipliers:
if not x.is_Rational:
common_coefficient = x
break
s_multipliers = [x/common_coefficient for x in s_multipliers]
if any(not x.is_Rational for x in s_multipliers):
raise NotImplementedError
s_multiplier = common_coefficient/reduce(ilcm, [S(x.q)
for x in s_multipliers], S(1))
if s_multiplier == common_coefficient:
if len(s_multipliers) == 0:
s_multiplier = common_coefficient
else:
s_multiplier = common_coefficient \
*reduce(igcd, [S(x.p) for x in s_multipliers])
exponent = S(1)
fac = S(1)
f = f.subs(s, s/s_multiplier)
fac /= s_multiplier
exponent = 1/s_multiplier
if a_ is not None:
a_ *= s_multiplier
if b_ is not None:
b_ *= s_multiplier
# 2)
numer, denom = f.as_numer_denom()
numer = Mul.make_args(numer)
denom = Mul.make_args(denom)
args = list(zip(numer, repeat(True))) + list(zip(denom, repeat(False)))
facs = []
dfacs = []
# *_gammas will contain pairs (a, c) representing Gamma(a*s + c)
numer_gammas = []
denom_gammas = []
# exponentials will contain bases for exponentials of s
exponentials = []
def exception(fact):
return IntegralTransformError("Inverse Mellin", f, "Unrecognised form '%s'." % fact)
while args:
fact, is_numer = args.pop()
if is_numer:
ugammas, lgammas = numer_gammas, denom_gammas
ufacs, lfacs = facs, dfacs
else:
ugammas, lgammas = denom_gammas, numer_gammas
ufacs, lfacs = dfacs, facs
def linear_arg(arg):
""" Test if arg is of form a*s+b, raise exception if not. """
if not arg.is_polynomial(s):
raise exception(fact)
p = Poly(arg, s)
if p.degree() != 1:
raise exception(fact)
return p.all_coeffs()
# constants
if not fact.has(s):
ufacs += [fact]
# exponentials
elif fact.is_Pow or isinstance(fact, exp_):
if fact.is_Pow:
base = fact.base
exp = fact.exp
else:
base = exp_polar(1)
exp = fact.args[0]
if exp.is_Integer:
cond = is_numer
if exp < 0:
cond = not cond
args += [(base, cond)]*abs(exp)
continue
elif not base.has(s):
a, b = linear_arg(exp)
if not is_numer:
base = 1/base
exponentials += [base**a]
facs += [base**b]
else:
raise exception(fact)
# linear factors
elif fact.is_polynomial(s):
p = Poly(fact, s)
if p.degree() != 1:
# We completely factor the poly. For this we need the roots.
# Now roots() only works in some cases (low degree), and RootOf
# only works without parameters. So try both...
coeff = p.LT()[1]
rs = roots(p, s)
if len(rs) != p.degree():
rs = RootOf.all_roots(p)
ufacs += [coeff]
args += [(s - c, is_numer) for c in rs]
continue
a, c = p.all_coeffs()
ufacs += [a]
c /= -a
# Now need to convert s - c
if left(c, is_numer):
ugammas += [(S(1), -c + 1)]
lgammas += [(S(1), -c)]
else:
ufacs += [-1]
ugammas += [(S(-1), c + 1)]
lgammas += [(S(-1), c)]
elif isinstance(fact, gamma):
a, b = linear_arg(fact.args[0])
if is_numer:
if (a > 0 and (left(-b/a, is_numer) is False)) or \
(a < 0 and (left(-b/a, is_numer) is True)):
raise NotImplementedError(
'Gammas partially over the strip.')
ugammas += [(a, b)]
elif isinstance(fact, sin):
# We try to re-write all trigs as gammas. This is not in
# general the best strategy, since sometimes this is impossible,
# but rewriting as exponentials would work. However trig functions
# in inverse mellin transforms usually all come from simplifying
# gamma terms, so this should work.
a = fact.args[0]
if is_numer:
# No problem with the poles.
gamma1, gamma2, fac_ = gamma(a/pi), gamma(1 - a/pi), pi
else:
gamma1, gamma2, fac_ = _rewrite_sin(linear_arg(a), s, a_, b_)
args += [(gamma1, not is_numer), (gamma2, not is_numer)]
ufacs += [fac_]
elif isinstance(fact, tan):
a = fact.args[0]
args += [(sin(a, evaluate=False), is_numer),
(sin(pi/2 - a, evaluate=False), not is_numer)]
elif isinstance(fact, cos):
a = fact.args[0]
args += [(sin(pi/2 - a, evaluate=False), is_numer)]
elif isinstance(fact, cot):
a = fact.args[0]
args += [(sin(pi/2 - a, evaluate=False), is_numer),
(sin(a, evaluate=False), not is_numer)]
else:
raise exception(fact)
fac *= Mul(*facs)/Mul(*dfacs)
# 3)
an, ap, bm, bq = [], [], [], []
for gammas, plus, minus, is_numer in [(numer_gammas, an, bm, True),
(denom_gammas, bq, ap, False)]:
while gammas:
a, c = gammas.pop()
if a != -1 and a != +1:
# We use the gamma function multiplication theorem.
p = abs(S(a))
newa = a/p
newc = c/p
if not a.is_Integer:
raise TypeError("a is not an integer")
for k in range(p):
gammas += [(newa, newc + k/p)]
if is_numer:
fac *= (2*pi)**((1 - p)/2) * p**(c - S(1)/2)
exponentials += [p**a]
else:
fac /= (2*pi)**((1 - p)/2) * p**(c - S(1)/2)
exponentials += [p**(-a)]
continue
if a == +1:
plus.append(1 - c)
else:
minus.append(c)
# 4)
# TODO
# 5)
arg = Mul(*exponentials)
# for testability, sort the arguments
an.sort(key=default_sort_key)
ap.sort(key=default_sort_key)
bm.sort(key=default_sort_key)
bq.sort(key=default_sort_key)
return (an, ap), (bm, bq), arg, exponent, fac
@_noconds_(True)
def _inverse_mellin_transform(F, s, x_, strip, as_meijerg=False):
""" A helper for the real inverse_mellin_transform function, this one here
assumes x to be real and positive. """
from sympy import (expand, expand_mul, hyperexpand, meijerg, And, Or,
arg, pi, re, factor, Heaviside, gamma, Add)
x = _dummy('t', 'inverse-mellin-transform', F, positive=True)
# Actually, we won't try integration at all. Instead we use the definition
# of the Meijer G function as a fairly general inverse mellin transform.
F = F.rewrite(gamma)
for g in [factor(F), expand_mul(F), expand(F)]:
if g.is_Add:
# do all terms separately
ress = [_inverse_mellin_transform(G, s, x, strip, as_meijerg,
noconds=False)
for G in g.args]
conds = [p[1] for p in ress]
ress = [p[0] for p in ress]
res = Add(*ress)
if not as_meijerg:
res = factor(res, gens=res.atoms(Heaviside))
return res.subs(x, x_), And(*conds)
try:
a, b, C, e, fac = _rewrite_gamma(g, s, strip[0], strip[1])
except IntegralTransformError:
continue
G = meijerg(a, b, C/x**e)
if as_meijerg:
h = G
else:
try:
h = hyperexpand(G)
except NotImplementedError as detail:
raise IntegralTransformError(
'Inverse Mellin', F, 'Could not calculate integral')
if h.is_Piecewise and len(h.args) == 3:
# XXX we break modularity here!
h = Heaviside(x - abs(C))*h.args[0].args[0] \
+ Heaviside(abs(C) - x)*h.args[1].args[0]
# We must ensure that the intgral along the line we want converges,
# and return that value.
# See [L], 5.2
cond = [abs(arg(G.argument)) < G.delta*pi]
# Note: we allow ">=" here, this corresponds to convergence if we let
# limits go to oo symetrically. ">" corresponds to absolute convergence.
cond += [And(Or(len(G.ap) != len(G.bq), 0 >= re(G.nu) + 1),
abs(arg(G.argument)) == G.delta*pi)]
cond = Or(*cond)
if cond == False:
raise IntegralTransformError(
'Inverse Mellin', F, 'does not converge')
return (h*fac).subs(x, x_), cond
raise IntegralTransformError('Inverse Mellin', F, '')
_allowed = None
class InverseMellinTransform(IntegralTransform):
"""
Class representing unevaluated inverse Mellin transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Mellin transforms, see the
:func:`inverse_mellin_transform` docstring.
"""
_name = 'Inverse Mellin'
_none_sentinel = Dummy('None')
_c = Dummy('c')
def __new__(cls, F, s, x, a, b, **opts):
if a is None:
a = InverseMellinTransform._none_sentinel
if b is None:
b = InverseMellinTransform._none_sentinel
return IntegralTransform.__new__(cls, F, s, x, a, b, **opts)
@property
def fundamental_strip(self):
a, b = self.args[3], self.args[4]
if a is InverseMellinTransform._none_sentinel:
a = None
if b is InverseMellinTransform._none_sentinel:
b = None
return a, b
def _compute_transform(self, F, s, x, **hints):
from sympy import postorder_traversal
global _allowed
if _allowed is None:
from sympy import (
exp, gamma, sin, cos, tan, cot, cosh, sinh, tanh,
coth, factorial, rf)
_allowed = set(
[exp, gamma, sin, cos, tan, cot, cosh, sinh, tanh, coth,
factorial, rf])
for f in postorder_traversal(F):
if f.is_Function and f.has(s) and f.func not in _allowed:
raise IntegralTransformError('Inverse Mellin', F,
'Component %s not recognised.' % f)
strip = self.fundamental_strip
return _inverse_mellin_transform(F, s, x, strip, **hints)
def _as_integral(self, F, s, x):
from sympy import Integral, I, oo
c = self.__class__._c
return Integral(F*x**(-s), (s, c - I*oo, c + I*oo))
def inverse_mellin_transform(F, s, x, strip, **hints):
r"""
Compute the inverse Mellin transform of `F(s)` over the fundamental
strip given by ``strip=(a, b)``.
This can be defined as
.. math:: f(x) = \int_{c - i\infty}^{c + i\infty} x^{-s} F(s) \mathrm{d}s,
for any `c` in the fundamental strip. Under certain regularity
conditions on `F` and/or `f`,
this recovers `f` from its Mellin transform `F`
(and vice versa), for positive real `x`.
One of `a` or `b` may be passed as ``None``; a suitable `c` will be
inferred.
If the integral cannot be computed in closed form, this function returns
an unevaluated :class:`InverseMellinTransform` object.
Note that this function will assume x to be positive and real, regardless
of the sympy assumptions!
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
>>> from sympy.integrals.transforms import inverse_mellin_transform
>>> from sympy import oo, gamma
>>> from sympy.abc import x, s
>>> inverse_mellin_transform(gamma(s), s, x, (0, oo))
exp(-x)
The fundamental strip matters:
>>> f = 1/(s**2 - 1)
>>> inverse_mellin_transform(f, s, x, (-oo, -1))
(x/2 - 1/(2*x))*Heaviside(x - 1)
>>> inverse_mellin_transform(f, s, x, (-1, 1))
-x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x)
>>> inverse_mellin_transform(f, s, x, (1, oo))
(-x/2 + 1/(2*x))*Heaviside(-x + 1)
See Also
========
mellin_transform
hankel_transform, inverse_hankel_transform
"""
return InverseMellinTransform(F, s, x, strip[0], strip[1]).doit(**hints)
##########################################################################
# Laplace Transform
##########################################################################
def _simplifyconds(expr, s, a):
r"""
Naively simplify some conditions occuring in ``expr``, given that `\operatorname{Re}(s) > a`.
>>> from sympy.integrals.transforms import _simplifyconds as simp
>>> from sympy.abc import x
>>> from sympy import sympify as S
>>> simp(abs(x**2) < 1, x, 1)
False
>>> simp(abs(x**2) < 1, x, 2)
False
>>> simp(abs(x**2) < 1, x, 0)
Abs(x**2) < 1
>>> simp(abs(1/x**2) < 1, x, 1)
True
>>> simp(S(1) < abs(x), x, 1)
True
>>> simp(S(1) < abs(1/x), x, 1)
False
>>> from sympy import Ne
>>> simp(Ne(1, x**3), x, 1)
True
>>> simp(Ne(1, x**3), x, 2)
True
>>> simp(Ne(1, x**3), x, 0)
1 != x**3
"""
from sympy.core.relational import ( StrictGreaterThan, StrictLessThan,
Unequality )
from sympy import Abs
def power(ex):
if ex == s:
return 1
if ex.is_Pow and ex.base == s:
return ex.exp
return None
def bigger(ex1, ex2):
""" Return True only if |ex1| > |ex2|, False only if |ex1| < |ex2|.
Else return None. """
if ex1.has(s) and ex2.has(s):
return None
if ex1.func is Abs:
ex1 = ex1.args[0]
if ex2.func is Abs:
ex2 = ex2.args[0]
if ex1.has(s):
return bigger(1/ex2, 1/ex1)
n = power(ex2)
if n is None:
return None
if n > 0 and (abs(ex1) <= abs(a)**n) is True:
return False
if n < 0 and (abs(ex1) >= abs(a)**n) is True:
return True
def replie(x, y):
""" simplify x < y """
if not (x.is_positive or x.func is Abs) \
or not (y.is_positive or y.func is Abs):
return (x < y)
r = bigger(x, y)
if r is not None:
return not r
return (x < y)
def replue(x, y):
if bigger(x, y) in (True, False):
return True
return Unequality(x, y)
def repl(ex, *args):
if isinstance(ex, bool):
return ex
return ex.replace(*args)
expr = repl(expr, StrictLessThan, replie)
expr = repl(expr, StrictGreaterThan, lambda x, y: replie(y, x))
expr = repl(expr, Unequality, replue)
return expr
@_noconds
def _laplace_transform(f, t, s_, simplify=True):
""" The backend function for Laplace transforms. """
from sympy import (re, Max, exp, pi, Abs, Min, periodic_argument as arg,
cos, Wild, symbols, polar_lift)
s = Dummy('s')
F = integrate(exp(-s*t) * f, (t, 0, oo))
if not F.has(Integral):
return _simplify(F.subs(s, s_), simplify), -oo, True
if not F.is_Piecewise:
raise IntegralTransformError(
'Laplace', f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(
'Laplace', f, 'integral in unexpected form')
def process_conds(conds):
""" Turn ``conds`` into a strip and auxiliary conditions. """
a = -oo
aux = True
conds = conjuncts(to_cnf(conds))
u = Dummy('u', real=True)
p, q, w1, w2, w3, w4, w5 = symbols(
'p q w1 w2 w3 w4 w5', cls=Wild, exclude=[s])
for c in conds:
a_ = oo
aux_ = []
for d in disjuncts(c):
m = d.match(abs(arg((s + w3)**p*q, w1)) < w2)
if not m:
m = d.match(abs(arg((s + w3)**p*q, w1)) <= w2)
if not m:
m = d.match(abs(arg((polar_lift(s + w3))**p*q, w1)) < w2)
if not m:
m = d.match(abs(arg((polar_lift(s + w3))**p*q, w1)) <= w2)
if m:
if m[q] > 0 and m[w2]/m[p] == pi/2:
d = re(s + m[w3]) > 0
m = d.match(
0 < cos(abs(arg(s**w1*w5, q))*w2)*abs(s**w3)**w4 - p)
if not m:
m = d.match(0 < cos(abs(
arg(polar_lift(s)**w1*w5, q))*w2)*abs(s**w3)**w4 - p)
if m and all(m[wild] > 0 for wild in [w1, w2, w3, w4, w5]):
d = re(s) > m[p]
d_ = d.replace(
re, lambda x: x.expand().as_real_imag()[0]).subs(re(s), t)
if not d.is_Relational or \
d.rel_op not in ('>', '>=', '<', '<=') \
or d_.has(s) or not d_.has(t):
aux_ += [d]
continue
soln = _solve_inequality(d_, t)
if not soln.is_Relational or \
soln.rel_op not in ('>', '>=', '<', '<='):
aux_ += [d]
continue
if soln.lts == t:
raise IntegralTransformError('Laplace', f,
'convergence not in half-plane?')
else:
a_ = Min(soln.lts, a_)
if a_ != oo:
a = Max(a_, a)
else:
aux = And(aux, Or(*aux_))
return a, aux
conds = [process_conds(c) for c in disjuncts(cond)]
conds2 = [x for x in conds if x[1] != False and x[0] != -oo]
if not conds2:
conds2 = [x for x in conds if x[1] != False]
conds = conds2
def cnt(expr):
if isinstance(expr, bool):
return 0
return expr.count_ops()
conds.sort(key=lambda x: (-x[0], cnt(x[1])))
if not conds:
raise IntegralTransformError('Laplace', f, 'no convergence found')
a, aux = conds[0]
def sbs(expr):
if isinstance(expr, bool):
return expr
return expr.subs(s, s_)
if simplify:
F = _simplifyconds(F, s, a)
aux = _simplifyconds(aux, s, a)
return _simplify(F.subs(s, s_), simplify), sbs(a), sbs(aux)
class LaplaceTransform(IntegralTransform):
"""
Class representing unevaluated Laplace transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute Laplace transforms, see the :func:`laplace_transform`
docstring.
"""
_name = 'Laplace'
def _compute_transform(self, f, t, s, **hints):
return _laplace_transform(f, t, s, **hints)
def _as_integral(self, f, t, s):
from sympy import Integral, exp
return Integral(f*exp(-s*t), (t, 0, oo))
def _collapse_extra(self, extra):
from sympy import And, Max
conds = []
planes = []
for plane, cond in extra:
conds.append(cond)
planes.append(plane)
cond = And(*conds)
plane = Max(*planes)
if cond == False:
raise IntegralTransformError(
'Laplace', None, 'No combined convergence.')
return plane, cond
def laplace_transform(f, t, s, **hints):
r"""
Compute the Laplace Transform `F(s)` of `f(t)`,
.. math :: F(s) = \int_0^\infty e^{-st} f(t) \mathrm{d}t.
For all "sensible" functions, this converges absolutely in a
half plane `a < \operatorname{Re}(s)`.
This function returns ``(F, a, cond)``
where ``F`` is the Laplace transform of ``f``, `\operatorname{Re}(s) > a` is the half-plane
of convergence, and ``cond`` are auxiliary convergence conditions.
If the integral cannot be computed in closed form, this function returns
an unevaluated :class:`LaplaceTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=True``,
only `F` will be returned (i.e. not ``cond``, and also not the plane ``a``).
>>> from sympy.integrals import laplace_transform
>>> from sympy.abc import t, s, a
>>> laplace_transform(t**a, t, s)
(s**(-a)*gamma(a + 1)/s, 0, -re(a) < 1)
See Also
========
inverse_laplace_transform, mellin_transform, fourier_transform
hankel_transform, inverse_hankel_transform
"""
return LaplaceTransform(f, t, s).doit(**hints)
@_noconds_(True)
def _inverse_laplace_transform(F, s, t_, plane, simplify=True):
""" The backend function for inverse Laplace transforms. """
from sympy import exp, Heaviside, log, expand_complex, Integral, Piecewise
from sympy.integrals.meijerint import meijerint_inversion, _get_coeff_exp
# There are two strategies we can try:
# 1) Use inverse mellin transforms - related by a simple change of variables.
# 2) Use the inversion integral.
t = Dummy('t', real=True)
def pw_simp(*args):
""" Simplify a piecewise expression from hyperexpand. """
# XXX we break modularity here!
if len(args) != 3:
return Piecewise(*args)
arg = args[2].args[0].argument
coeff, exponent = _get_coeff_exp(arg, t)
e1 = args[0].args[0]
e2 = args[1].args[0]
return Heaviside(1/abs(coeff) - t**exponent)*e1 \
+ Heaviside(t**exponent - 1/abs(coeff))*e2
try:
f, cond = inverse_mellin_transform(F, s, exp(-t), (None, oo),
needeval=True, noconds=False)
except IntegralTransformError:
f = None
if f is None:
f = meijerint_inversion(F, s, t)
if f is None:
raise IntegralTransformError('Inverse Laplace', f, '')
if f.is_Piecewise:
f, cond = f.args[0]
if f.has(Integral):
raise IntegralTransformError('Inverse Laplace', f,
'inversion integral of unrecognised form.')
else:
cond = True
f = f.replace(Piecewise, pw_simp)
if f.is_Piecewise:
# many of the functions called below can't work with piecewise
# (b/c it has a bool in args)
return f.subs(t, t_), cond
u = Dummy('u')
def simp_heaviside(arg):
a = arg.subs(exp(-t), u)
if a.has(t):
return Heaviside(arg)
rel = _solve_inequality(a > 0, u)
if rel.lts == u:
k = log(rel.gts)
return Heaviside(t + k)
else:
k = log(rel.lts)
return Heaviside(-(t + k))
f = f.replace(Heaviside, simp_heaviside)
def simp_exp(arg):
return expand_complex(exp(arg))
f = f.replace(exp, simp_exp)
# TODO it would be nice to fix cosh and sinh ... simplify messes these
# exponentials up
return _simplify(f.subs(t, t_), simplify), cond
class InverseLaplaceTransform(IntegralTransform):
"""
Class representing unevaluated inverse Laplace transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Laplace transforms, see the
:func:`inverse_laplace_transform` docstring.
"""
_name = 'Inverse Laplace'
_none_sentinel = Dummy('None')
_c = Dummy('c')
def __new__(cls, F, s, x, plane, **opts):
if plane is None:
plane = InverseLaplaceTransform._none_sentinel
return IntegralTransform.__new__(cls, F, s, x, plane, **opts)
@property
def fundamental_plane(self):
plane = self.args[3]
if plane is InverseLaplaceTransform._none_sentinel:
plane = None
return plane
def _compute_transform(self, F, s, t, **hints):
return _inverse_laplace_transform(F, s, t, self.fundamental_plane, **hints)
def _as_integral(self, F, s, t):
from sympy import I, Integral, exp
c = self.__class__._c
return Integral(exp(s*t)*F, (s, c - I*oo, c + I*oo))
def inverse_laplace_transform(F, s, t, plane=None, **hints):
r"""
Compute the inverse Laplace transform of `F(s)`, defined as
.. math :: f(t) = \int_{c-i\infty}^{c+i\infty} e^{st} F(s) \mathrm{d}s,
for `c` so large that `F(s)` has no singularites in the
half-plane `\operatorname{Re}(s) > c-\epsilon`.
The plane can be specified by
argument ``plane``, but will be inferred if passed as None.
Under certain regularity conditions, this recovers `f(t)` from its
Laplace Transform `F(s)`, for non-negative `t`, and vice
versa.
If the integral cannot be computed in closed form, this function returns
an unevaluated :class:`InverseLaplaceTransform` object.
Note that this function will always assume `t` to be real,
regardless of the sympy assumption on `t`.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
>>> from sympy.integrals.transforms import inverse_laplace_transform
>>> from sympy import exp, Symbol
>>> from sympy.abc import s, t
>>> a = Symbol('a', positive=True)
>>> inverse_laplace_transform(exp(-a*s)/s, s, t)
Heaviside(-a + t)
See Also
========
laplace_transform
hankel_transform, inverse_hankel_transform
"""
return InverseLaplaceTransform(F, s, t, plane).doit(**hints)
##########################################################################
# Fourier Transform
##########################################################################
@_noconds_(True)
def _fourier_transform(f, x, k, a, b, name, simplify=True):
"""
Compute a general Fourier-type transform
F(k) = a int_-oo^oo exp(b*I*x*k) f(x) dx.
For suitable choice of a and b, this reduces to the standard Fourier
and inverse Fourier transforms.
"""
from sympy import exp, I, oo
F = integrate(a*f*exp(b*I*x*k), (x, -oo, oo))
if not F.has(Integral):
return _simplify(F, simplify), True
if not F.is_Piecewise:
raise IntegralTransformError(name, f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(name, f, 'integral in unexpected form')
return _simplify(F, simplify), cond
class FourierTypeTransform(IntegralTransform):
""" Base class for Fourier transforms.
Specify cls._a and cls._b.
"""
def _compute_transform(self, f, x, k, **hints):
return _fourier_transform(f, x, k,
self.__class__._a, self.__class__._b,
self.__class__._name, **hints)
def _as_integral(self, f, x, k):
from sympy import Integral, exp, I
a = self.__class__._a
b = self.__class__._b
return Integral(a*f*exp(b*I*x*k), (x, -oo, oo))
class FourierTransform(FourierTypeTransform):
"""
Class representing unevaluated Fourier transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute Fourier transforms, see the :func:`fourier_transform`
docstring.
"""
_name = 'Fourier'
_a = 1
_b = -2*S.Pi
def fourier_transform(f, x, k, **hints):
r"""
Compute the unitary, ordinary-frequency Fourier transform of `f`, defined
as
.. math:: F(k) = \int_{-\infty}^\infty f(x) e^{-2\pi i x k} \mathrm{d} x.
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`FourierTransform` object.
For other Fourier transform conventions, see the function
:func:`sympy.integrals.transforms._fourier_transform`.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
>>> from sympy import fourier_transform, exp
>>> from sympy.abc import x, k
>>> fourier_transform(exp(-x**2), x, k)
sqrt(pi)*exp(-pi**2*k**2)
>>> fourier_transform(exp(-x**2), x, k, noconds=False)
(sqrt(pi)*exp(-pi**2*k**2), True)
See Also
========
inverse_fourier_transform
sine_transform, inverse_sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return FourierTransform(f, x, k).doit(**hints)
class InverseFourierTransform(FourierTypeTransform):
"""
Class representing unevaluated inverse Fourier transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Fourier transforms, see the
:func:`inverse_fourier_transform` docstring.
"""
_name = 'Inverse Fourier'
_a = 1
_b = 2*S.Pi
def inverse_fourier_transform(F, k, x, **hints):
r"""
Compute the unitary, ordinary-frequency inverse Fourier transform of `F`,
defined as
.. math:: f(x) = \int_{-\infty}^\infty F(k) e^{2\pi i x k} \mathrm{d} k.
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`InverseFourierTransform` object.
For other Fourier transform conventions, see the function
:func:`sympy.integrals.transforms._fourier_transform`.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
>>> from sympy import inverse_fourier_transform, exp, sqrt, pi
>>> from sympy.abc import x, k
>>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x)
exp(-x**2)
>>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x, noconds=False)
(exp(-x**2), True)
See Also
========
fourier_transform
sine_transform, inverse_sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return InverseFourierTransform(F, k, x).doit(**hints)
##########################################################################
# Fourier Sine and Cosine Transform
##########################################################################
from sympy import sin, cos, sqrt, pi, I, oo
@_noconds_(True)
def _sine_cosine_transform(f, x, k, a, b, K, name, simplify=True):
"""
Compute a general sine or cosine-type transform
F(k) = a int_0^oo b*sin(x*k) f(x) dx.
F(k) = a int_0^oo b*cos(x*k) f(x) dx.
For suitable choice of a and b, this reduces to the standard sine/cosine
and inverse sine/cosine transforms.
"""
F = integrate(a*f*K(b*x*k), (x, 0, oo))
if not F.has(Integral):
return _simplify(F, simplify), True
if not F.is_Piecewise:
raise IntegralTransformError(name, f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(name, f, 'integral in unexpected form')
return _simplify(F, simplify), cond
class SineCosineTypeTransform(IntegralTransform):
"""
Base class for sine and cosine transforms.
Specify cls._a and cls._b and cls._kern.
"""
def _compute_transform(self, f, x, k, **hints):
return _sine_cosine_transform(f, x, k,
self.__class__._a, self.__class__._b,
self.__class__._kern,
self.__class__._name, **hints)
def _as_integral(self, f, x, k):
from sympy import Integral, exp, I
a = self.__class__._a
b = self.__class__._b
K = self.__class__._kern
return Integral(a*f*K(b*x*k), (x, 0, oo))
class SineTransform(SineCosineTypeTransform):
"""
Class representing unevaluated sine transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute sine transforms, see the :func:`sine_transform`
docstring.
"""
_name = 'Sine'
_kern = sin
_a = sqrt(2)/sqrt(pi)
_b = 1
def sine_transform(f, x, k, **hints):
r"""
Compute the unitary, ordinary-frequency sine transform of `f`, defined
as
.. math:: F(k) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty f(x) \sin(2\pi x k) \mathrm{d} x.
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`SineTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
>>> from sympy import sine_transform, exp
>>> from sympy.abc import x, k, a
>>> sine_transform(x*exp(-a*x**2), x, k)
sqrt(2)*k*exp(-k**2/(4*a))/(4*a**(3/2))
>>> sine_transform(x**(-a), x, k)
2**(-a + 1/2)*k**(a - 1)*gamma(-a/2 + 1)/gamma(a/2 + 1/2)
See Also
========
fourier_transform, inverse_fourier_transform
inverse_sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return SineTransform(f, x, k).doit(**hints)
class InverseSineTransform(SineCosineTypeTransform):
"""
Class representing unevaluated inverse sine transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse sine transforms, see the
:func:`inverse_sine_transform` docstring.
"""
_name = 'Inverse Sine'
_kern = sin
_a = sqrt(2)/sqrt(pi)
_b = 1
def inverse_sine_transform(F, k, x, **hints):
r"""
Compute the unitary, ordinary-frequency inverse sine transform of `F`,
defined as
.. math:: f(x) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty F(k) \sin(2\pi x k) \mathrm{d} k.
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`InverseSineTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
>>> from sympy import inverse_sine_transform, exp, sqrt, gamma, pi
>>> from sympy.abc import x, k, a
>>> inverse_sine_transform(2**((1-2*a)/2)*k**(a - 1)*
... gamma(-a/2 + 1)/gamma((a+1)/2), k, x)
x**(-a)
>>> inverse_sine_transform(sqrt(2)*k*exp(-k**2/(4*a))/(4*sqrt(a)**3), k, x)
x*exp(-a*x**2)
See Also
========
fourier_transform, inverse_fourier_transform
sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return InverseSineTransform(F, k, x).doit(**hints)
class CosineTransform(SineCosineTypeTransform):
"""
Class representing unevaluated cosine transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute cosine transforms, see the :func:`cosine_transform`
docstring.
"""
_name = 'Cosine'
_kern = cos
_a = sqrt(2)/sqrt(pi)
_b = 1
def cosine_transform(f, x, k, **hints):
r"""
Compute the unitary, ordinary-frequency cosine transform of `f`, defined
as
.. math:: F(k) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty f(x) \cos(2\pi x k) \mathrm{d} x.
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`CosineTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
>>> from sympy import cosine_transform, exp, sqrt, cos
>>> from sympy.abc import x, k, a
>>> cosine_transform(exp(-a*x), x, k)
sqrt(2)*a/(sqrt(pi)*(a**2 + k**2))
>>> cosine_transform(exp(-a*sqrt(x))*cos(a*sqrt(x)), x, k)
a*exp(-a**2/(2*k))/(2*k**(3/2))
See Also
========
fourier_transform, inverse_fourier_transform,
sine_transform, inverse_sine_transform
inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return CosineTransform(f, x, k).doit(**hints)
class InverseCosineTransform(SineCosineTypeTransform):
"""
Class representing unevaluated inverse cosine transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse cosine transforms, see the
:func:`inverse_cosine_transform` docstring.
"""
_name = 'Inverse Cosine'
_kern = cos
_a = sqrt(2)/sqrt(pi)
_b = 1
def inverse_cosine_transform(F, k, x, **hints):
r"""
Compute the unitary, ordinary-frequency inverse cosine transform of `F`,
defined as
.. math:: f(x) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty F(k) \cos(2\pi x k) \mathrm{d} k.
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`InverseCosineTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
>>> from sympy import inverse_cosine_transform, exp, sqrt, pi
>>> from sympy.abc import x, k, a
>>> inverse_cosine_transform(sqrt(2)*a/(sqrt(pi)*(a**2 + k**2)), k, x)
exp(-a*x)
>>> inverse_cosine_transform(1/sqrt(k), k, x)
1/sqrt(x)
See Also
========
fourier_transform, inverse_fourier_transform,
sine_transform, inverse_sine_transform
cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return InverseCosineTransform(F, k, x).doit(**hints)
##########################################################################
# Hankel Transform
##########################################################################
@_noconds_(True)
def _hankel_transform(f, r, k, nu, name, simplify=True):
"""
Compute a general Hankel transform
.. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r.
"""
from sympy import besselj, oo
F = integrate(f*besselj(nu, k*r)*r, (r, 0, oo))
if not F.has(Integral):
return _simplify(F, simplify), True
if not F.is_Piecewise:
raise IntegralTransformError(name, f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(name, f, 'integral in unexpected form')
return _simplify(F, simplify), cond
class HankelTypeTransform(IntegralTransform):
"""
Base class for Hankel transforms.
"""
def doit(self, **hints):
return self._compute_transform(self.function,
self.function_variable,
self.transform_variable,
self.args[3],
**hints)
def _compute_transform(self, f, r, k, nu, **hints):
return _hankel_transform(f, r, k, nu, self._name, **hints)
def _as_integral(self, f, r, k, nu):
from sympy import Integral, besselj, oo
return Integral(f*besselj(nu, k*r)*r, (r, 0, oo))
@property
def as_integral(self):
return self._as_integral(self.function,
self.function_variable,
self.transform_variable,
self.args[3])
class HankelTransform(HankelTypeTransform):
"""
Class representing unevaluated Hankel transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute Hankel transforms, see the :func:`hankel_transform`
docstring.
"""
_name = 'Hankel'
def hankel_transform(f, r, k, nu, **hints):
r"""
Compute the Hankel transform of `f`, defined as
.. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r.
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`HankelTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
>>> from sympy import hankel_transform, inverse_hankel_transform
>>> from sympy import gamma, exp, sinh, cosh
>>> from sympy.abc import r, k, m, nu, a
>>> ht = hankel_transform(1/r**m, r, k, nu)
>>> ht
2*2**(-m)*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2)
>>> inverse_hankel_transform(ht, k, r, nu)
r**(-m)
>>> ht = hankel_transform(exp(-a*r), r, k, 0)
>>> ht
a/(k**3*(a**2/k**2 + 1)**(3/2))
>>> inverse_hankel_transform(ht, k, r, 0)
exp(-a*r)
See Also
========
fourier_transform, inverse_fourier_transform
sine_transform, inverse_sine_transform
cosine_transform, inverse_cosine_transform
inverse_hankel_transform
mellin_transform, laplace_transform
"""
return HankelTransform(f, r, k, nu).doit(**hints)
class InverseHankelTransform(HankelTypeTransform):
"""
Class representing unevaluated inverse Hankel transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Hankel transforms, see the
:func:`inverse_hankel_transform` docstring.
"""
_name = 'Inverse Hankel'
def inverse_hankel_transform(F, k, r, nu, **hints):
r"""
Compute the inverse Hankel transform of `F` defined as
.. math:: f(r) = \int_{0}^\infty F_\nu(k) J_\nu(k r) k \mathrm{d} k.
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`InverseHankelTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
>>> from sympy import hankel_transform, inverse_hankel_transform, gamma
>>> from sympy import gamma, exp, sinh, cosh
>>> from sympy.abc import r, k, m, nu, a
>>> ht = hankel_transform(1/r**m, r, k, nu)
>>> ht
2*2**(-m)*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2)
>>> inverse_hankel_transform(ht, k, r, nu)
r**(-m)
>>> ht = hankel_transform(exp(-a*r), r, k, 0)
>>> ht
a/(k**3*(a**2/k**2 + 1)**(3/2))
>>> inverse_hankel_transform(ht, k, r, 0)
exp(-a*r)
See Also
========
fourier_transform, inverse_fourier_transform
sine_transform, inverse_sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform
mellin_transform, laplace_transform
"""
return InverseHankelTransform(F, k, r, nu).doit(**hints)
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/sympy/integrals/transforms.py
|
Python
|
gpl-3.0
| 61,082
|
from __future__ import absolute_import, print_function, division
def MonObuVar(L_m, siteConf):
"""Redirects to stabilityParam()"""
return stabilityParam(L_m, siteConf)
def stabilityParam(L_m, siteConf):
"""
Calculates the Monin-Obukhov Similarity Variable
defined as
zeta = (z-d)/Lo
where d is the displacement height or zero-plane displacement
and L_m is the Monin-Obukhov Length.
"""
z=siteConf.measurement_height
d=siteConf.displacement_height
zeta = (z-d)/L_m
return zeta
def MonObuLen(theta_v_star, theta_v_mean, u_star, g=None):
"""Redirects to obukhovLen()"""
return obukhovLen(theta_v_star, theta_v_mean, u_star)
def obukhovLen(data, units, theta_v_mean=None, theta_v_mean_unit=None, notation=None, inplace_units=True):
"""
Calculates the Monin-Obukhov Length
according to:
GARRAT, The atmospheric boundary layer, 1992 (eq. 1.11, p. 10)
L = ( u_star^2 * theta_v ) / ( kappa * g * theta_v_star )
KUNDU, Fluid mechanics, 1990 (eq 71, chap. 12, p. 462)
L_M = - u_star^3 / ( kappa * alpha * g * cov(w,T') )
ARYA, Introduction to micrometeorology (eq. 11.1, p. 214)
L = - u_star^3 / (kappa * (g/T_0) * (H_0/(rho*c_p)) )
STULL, An introduction to Boundary layer meteorology, 1988 (eq. 5.7b, p. 181)
L = - ( theta_v * u_star^3 ) / ( kappa *g* cov(w',theta_v') )
"""
from .. import constants
from .. import algs
defs = algs.get_notation(notation)
data = data.copy()
g = constants.gravity
kappa = constants.kappa
if not theta_v_mean:
if not theta_v_mean_unit:
raise ValueError('Must provide theta_v_mean_unit keyword if theta_v_mean is provided')
theta_v_mean = data[ defs.mean_virtual_temperature ]
num = (data[ defs.u_star ]**2.)*theta_v_mean
num_unit = (units[ defs.u_star ]**2.)*theta_v_mean_unit
denom = kappa*g*data[ defs.virtual_temperature_star ]
denom_unit = constants.units['gravity']*units[ defs.virtual_temperature_star ]
Lo = - num/denom
Lo_unit = num_unit/denom_unit
if inplace_units:
units.update({defs.obukhov_length:Lo_unit})
return Lo
else:
return Lo, Lo_unit
def turbulentScales(data, siteConf, units, notation=None, theta_v_mean=None, theta_v_mean_unit=None,
theta_fluct_from_theta_v=True, solutes=[], output_as_df=True, inplace_units=True):
"""
Calculates characteristic lengths for data
The names of the variables are retrived out the dictionary. You can update the dictionary
and change the names by using the notation_defs keyworkd, which is a notation object
Parameters
----------
data: pandas.DataFrame
dataset to be used. It must either be the raw and turbulent data, or the covariances of such data
siteConf: pymicra.siteConfig object
has the site configurations to calculate the obukhovLen
units: dict
dict units for the input data
output_as_df: boolean
True if you want the output to be a one-line pandas.DataFrame. A pd.Series
will be output if False.
inplace_units: bool
whether or not to update the units dict in place
Returns
-------
pandas.Series or pandas.Dataframe
depending on return_as_df
"""
from .. import constants
from .. import algs
from .. import ureg
import pandas as pd
import numpy as np
defs = algs.get_notation(notation)
defsdic = defs.__dict__
data = data.copy()
outunits = {}
cunits = constants.units
print('Beginning to extract turbulent scales...')
#---------
# First we define the names of the columns according to notation
u_fluc = defs.u_fluctuations
w_fluc = defs.w_fluctuations
mrho_h2o_fluc = defs.h2o_molar_density_fluctuations
rho_h2o_fluc = defs.h2o_mass_density_fluctuations
theta_fluc = defs.thermodyn_temp_fluctuations
theta_v_fluc = defs.virtual_temp_fluctuations
q_fluc = defs.specific_humidity_fluctuations
solutesf = [ defsdic['%s_molar_density_fluctuations' % solute] for solute in solutes ]
solutestars = [ defsdic['%s_molar_density_star' % solute] for solute in solutes ]
concsolutesf = [ defsdic['%s_mass_concentration_fluctuations' % solute] for solute in solutes ]
#---------
#---------
# If data is already covariances we go from there
if (data.shape[0] == data.shape[1]) and all(data.index == data.columns):
print('Data seems to be covariances. Will it use as covariances ...')
cov = data.copy()
outname = None
#---------
#---------
# If data is raw data, calculate covariances.
else:
print('Data seems to be raw data. Will calculate covariances ...')
#---------
# Now we try to calculate or identify the fluctuations of theta
outname = data.index[0]
theta_mean = data[ defs.thermodyn_temp ].mean()
if (theta_fluc not in data.columns) or theta_fluct_from_theta_v:
print('Fluctuations of theta not found. Will try to calculate it ... ', end='')
#---------
# We need the mean of the specific humidity and temperature
if not (units[ theta_v_fluc ]==ureg('kelvin') and units[ defs.thermodyn_temp ]==ureg('kelvin')):
raise TypeError('\nUnits for both the virtual temperature fluctuations and the thermodynamic temperature fluctuations must be Kelvin')
data_q_mean = data[ defs.specific_humidity ].mean()
data[ theta_fluc ] = (data[theta_v_fluc] - 0.61*theta_mean*data[q_fluc])/(1. + 0.61*data_q_mean)
theta_fluc_unit = units[ theta_v_fluc ]
print('done!')
#---------
#---------
#-----------
# First we construct the covariance matrix (slower but more readable than doing it separately)
# maybe figure out later a way that is both faster and more readable
print('Calculating the covariances ... ', end='')
cov = data[[u_fluc, w_fluc, theta_v_fluc, theta_fluc, q_fluc, mrho_h2o_fluc] + solutesf ].cov()
print('done!')
#-----------
#---------
#---------
# Now to calculate the characteristic lengths, scales and etc
print('Calculating the turbulent scales of wind, temperature and humidity ... ', end='')
out = pd.Series(name=outname)
u_star = np.sqrt(-cov.loc[u_fluc, w_fluc])
out[ defs.u_star ] = u_star
theta_v_star = cov.loc[theta_v_fluc, w_fluc] / u_star
out[ defs.virtual_temp_star ] = theta_v_star
out[ defs.thermodyn_temp_star ] = cov.loc[theta_fluc, w_fluc] / u_star
out[ defs.h2o_molar_density_star ] = cov.loc[ mrho_h2o_fluc, w_fluc ] / u_star
print('done!')
#---------
#---------
# Now we set the units of the legths
outunits = {}
outunits[ defs.u_star ] = units[ u_fluc ]
outunits[ defs.virtual_temp_star ] = units[ theta_v_fluc ]
outunits[ defs.thermodyn_temp_star ] = units[ theta_v_fluc ]
outunits[ defs.h2o_molar_density_star ] = units[ mrho_h2o_fluc ]
#---------
#---------
# The solutes have to be calculated separately
for sol_star, sol_fluc, sol in zip(solutestars, solutesf, solutes):
print('Calculating the turbulent scale of %s ... ' % sol, end='')
out[ sol_star ] = cov.loc[sol_fluc, w_fluc] / u_star
outunits[ sol_star ] = units[ sol_fluc ]
print('done!')
#---------
#---------
# We check for the mean virtual temperature
if not theta_v_mean:
if defs.mean_virtual_temperature in data.columns:
theta_v_mean = data[ defs.mean_virtual_temperature ].mean()
theta_v_mean_unit = units[defs.mean_virtual_temperature]
else:
theta_v_mean = data[ defs.virtual_temperature ].mean()
theta_v_mean_unit = units[defs.virtual_temperature]
#---------
#---------
# Now we calculate the obukhov length and the similarity variable
print('Calculating Obukhov length and stability parameter ... ', end='')
Lo = obukhovLen(out, outunits, theta_v_mean=theta_v_mean, theta_v_mean_unit=theta_v_mean_unit, inplace_units=True)
out[ defs.obukhov_length ] = Lo
out[ defs.stability_parameter ] = stabilityParam(Lo, siteConf)
outunits[ defs.obukhov_length ] = (outunits[ defs.u_star ]**2.)/cunits[ 'gravity' ]
outunits[ defs.stability_parameter ] = ureg.meter/outunits[ defs.obukhov_length ]
print('done!')
#---------
#---------
# Create a one-row dataframe if output_as_df is True
if output_as_df:
out = out.to_frame().T
#---------
#---------
# Finally we construct the output dataframe
if inplace_units:
units.update(outunits)
return out
else:
return out, outunits
#---------
|
tomchor/pymicra
|
pymicra/micro/scales.py
|
Python
|
gpl-3.0
| 8,923
|
import logging
from .base import AbstractHandler
logger = logging.getLogger(__name__)
class SearchHandler(AbstractHandler):
"""
Supported query parameters:
search keyword
search keyword [type=playlist]
TODO:
search keyword [type=all,source=netease,limit=10]
"""
cmds = 'search'
def handle(self, cmd):
return self.search(cmd.args[0], cmd.options)
def search(self, keyword, options=None):
"""
:param string keyword: serach keyword
:param dict options: search options
:return:
"""
providers = self.library.list()
source_in = [provd.identifier for provd in providers
if provd.Song.meta.allow_get]
params = {}
if options is not None:
type_in = options.pop('type', None)
source_in = options.pop('source', None)
source_in_list = []
if source_in is None:
source_in_list = source_in
elif isinstance(source_in, str):
source_in_list = source_in.split(',')
else:
assert isinstance(source_in, list)
source_in_list = source_in
if type_in is not None:
params['type_in'] = type_in
params['source_in'] = source_in_list
if options:
logger.warning('Unknown cmd options: %s', options)
# TODO: limit output lines
return list(self.library.search(keyword, **params))
|
cosven/FeelUOwn
|
feeluown/server/handlers/search.py
|
Python
|
gpl-3.0
| 1,516
|
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible.module_utils import basic
from library import mysql_query
from tests.fixtures import MYSQL_CONNECTION_PARAMS, Fixture
from tests.utils import set_module_args, AnsibleFailJson, exit_json, fail_json, AnsibleExitJson
class MysqlQueryMultiCheckTest(unittest.TestCase):
def setUp(self):
self.module = mysql_query
self.mock_exit_fail = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
self.mock_exit_fail.start()
self.addCleanup(self.mock_exit_fail.stop)
self.f = Fixture()
self.f.create_database()
self.f.create_multicolumn_example()
def tearDown(self):
self.f.close()
def test_insert_required(self):
set_module_args(
login_user=MYSQL_CONNECTION_PARAMS['user'],
name=MYSQL_CONNECTION_PARAMS['db'],
login_password=MYSQL_CONNECTION_PARAMS['passwd'],
login_host=MYSQL_CONNECTION_PARAMS['host'],
table='multicolumn_example',
identifiers=dict(identifier1='elmar@athmer.org', identifier2='4', identifier3='testInsert'),
values=dict(value1='8', value2='admin', value3="made up"),
_ansible_check_mode=True,
)
with self.assertRaises(AnsibleExitJson) as e:
self.module.main()
result = e.exception.args[0]
self.assertTrue(result['changed'], 'a required change is detected')
self.assertRegexpMatches(result['msg'], 'insert')
self.assertEquals(self.f.count_multicolumn_example(), 0, 'no row has been inserted in check-mode')
def test_no_change_required(self):
# insert a row that does not need to be updated
self.f.insert_into_multicolumn_example(['elmar@athmer.org', 4, 'testNoChangeRequired'], [8, 'admin', 'made up'])
set_module_args(
login_user=MYSQL_CONNECTION_PARAMS['user'],
name=MYSQL_CONNECTION_PARAMS['db'],
login_password=MYSQL_CONNECTION_PARAMS['passwd'],
login_host=MYSQL_CONNECTION_PARAMS['host'],
table='multicolumn_example',
identifiers=dict(identifier1='elmar@athmer.org', identifier2='4', identifier3='testNoChangeRequired'),
values={'value1': '8', 'value2': 'admin', 'value3': "made up"},
)
with self.assertRaises(AnsibleExitJson) as e:
self.module.main()
result = e.exception.args[0]
self.assertIn('changed', result)
self.assertFalse(result['changed'], 'no changed required is detected')
self.assertEquals(self.f.count_multicolumn_example(), 1, 'no additional row has been inserted in check-mode')
def test_change_detection_for_digits_in_strings(self):
# insert a row that does not need to be updated
self.f.insert_into_multicolumn_example(['elmar@athmer.org', 4, '5'], [8, '15', '16'])
set_module_args(
login_user=MYSQL_CONNECTION_PARAMS['user'],
name=MYSQL_CONNECTION_PARAMS['db'],
login_password=MYSQL_CONNECTION_PARAMS['passwd'],
login_host=MYSQL_CONNECTION_PARAMS['host'],
table='multicolumn_example',
identifiers=dict(identifier1='elmar@athmer.org', identifier2='4', identifier3='5'),
values={'value1': '8', 'value2': '15', 'value3': "16"},
)
with self.assertRaises(AnsibleExitJson) as e:
self.module.main()
result = e.exception.args[0]
self.assertIn('changed', result)
self.assertFalse(result['changed'], 'no changed required is detected')
self.assertEquals(self.f.count_multicolumn_example(), 1, 'no additional row has been inserted in check-mode')
|
zauberpony/ansible-mysql-query
|
tests/test_mysql_query_multi_check.py
|
Python
|
gpl-3.0
| 3,789
|
"""
You are given the following information, but you may prefer to do some research for yourself.
1 Jan 1900 was a Monday.
Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
"""
from datetime import date, timedelta
start = date(1901, 1, 1)
end = date(2000, 12, 31)
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
sundays = 0
for d in daterange(start, end):
if d.weekday() == 6 and d.day == 1:
sundays += 1
print(d)
print(sundays)
|
adiultra/pysick
|
projecteuler/p19.py
|
Python
|
gpl-3.0
| 911
|
# (c) Copyright 2015, University of Manchester
#
# This file is part of the Pyomo Plugin Demo Suite.
#
# The Pyomo Plugin Demo Suite is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The Pyomo Plugin Demo Suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Pyomo Plugin Demo Suite. If not, see <http://www.gnu.org/licenses/>.
#Author: Majed Khadem, Silvia Padula, Khaled Mohamed, Stephen Knox, Julien Harou
# Importing needed Packages
from xml.dom import ValidationErr
from pyomo.environ import *
from pyomo.opt import SolverFactory
class PyMode():
# Declaring the model
def __init__(self):
model = AbstractModel()
# Declaring model indexes using sets
model.nodes = Set()
model.links = Set(within=model.nodes*model.nodes)
model.river_section = Set(within=model.nodes*model.nodes) #==>links
model.agricultural=Set()
model.urban=Set()
model.junction=Set()
model.surface_reservoir=Set()
#model.demand_nodes = Set() #==>urban and agricultural
#model.nonstorage_nodes = Set() #=>junction, urban and agricultural
model.time_step = Set()
# Declaring model parameters
model.inflow = Param(model.surface_reservoir, model.time_step)
model.current_time_step = Set()
model.consumption_coefficient = Param(model.nodes)
#model.consumption_coefficient = Param(model.urban)
model.initial_storage = Param(model.surface_reservoir, mutable=True)
model.cost = Param(model.river_section, model.time_step)
model.flow_multiplier = Param(model.river_section, model.time_step)
model.min_flow = Param(model.river_section, model.time_step)
model.max_flow = Param(model.river_section, model.time_step)
model.storagelower= Param(model.surface_reservoir, model.time_step)
model.storageupper = Param(model.surface_reservoir, model.time_step)
model.Q = Var(model.river_section, domain=NonNegativeReals, bounds=flow_capacity_constraint) #1e6 m^3 mon^-1
model.Z = Objective(rule=objective_function, sense=minimize) #1e6 m^3 mon^-1
#Declaring delivery
model.delivery=Var(model.nodes, domain=NonNegativeReals) #1e6 m^3 mon^-1
# Declaring state variable S
model.S = Var(model.surface_reservoir, domain=NonNegativeReals, bounds=storage_capacity_constraint) #1e6 m^3 mon^-1
model.mass_balance_const_agr = Constraint(model.agricultural, rule=mass_balance_agricultural)
model.mass_balance_const_ur = Constraint(model.urban, rule=mass_balance_urban)
model.mass_balance_const_jun = Constraint(model.junction, rule=mass_balance_junction)
model.storage_mass_balance_const = Constraint(model.surface_reservoir, rule=storage_mass_balance)
self.model=model
def run(self, input_file):
opt = SolverFactory("glpk")
list=[]
list_=[]
instances=[]
self.model.current_time_step.add(1)
instance=self.model.create_instance(input_file)
for comp in instance.component_objects():
if str(comp) == "time_step":
parmobject = getattr(instance, str(comp))
for vv in parmobject.value:
list_.append(vv)
instance =self.model.create_instance(input_file)
storage={}
demand_nodes=get_demand_nodes_list(instance)
for vv in list_:
##################
self.cu_timp=vv
self.model.current_time_step.clear()
#self.model.preprocess()
self.model.current_time_step.add(vv)
#self.model.preprocess()
instance=self.model.create_instance(input_file)
if(len(storage)>0):
set_initial_storage(instance, storage)
self.model.preprocess()
instance.preprocess()
else:
instance.preprocess()
res=opt.solve(instance)
instance.solutions.load_from(res)
instance.preprocess()
storage=get_storage(instance)
set_delivery(instance, demand_nodes, vv)
instance.solutions.load_from(res)
instances.append(instance)
list.append(res)
count=1
for res in instances:
print " ========= Time step: %s =========="%count
self.display_variables(res)
count+=1
return list, instances
def display_variables(self, instance):
for var in instance.component_objects(Var):
s_var = getattr(instance, str(var))
print "=================="
print "Variable: %s"%s_var
print "=================="
for vv in s_var:
if vv is None:
print s_var," : ", s_var.value
continue
if type(vv) is str:
name = ''.join(map(str,vv))
print name ,": ",(s_var[vv].value)
elif len(vv) == 2:
name = "[" + ', '.join(map(str,vv)) + "]"
print name ,": ",(s_var[vv].value)
# Defining the flow lower and upper bound
def flow_capacity_constraint(model, node, node2):
return (model.min_flow[node, node2, model.current_time_step], model.max_flow[node, node2, model.current_time_step])
# Defining the storage lower and upper bound
def storage_capacity_constraint(model, storage_nodes):
return (model.storagelower[storage_nodes, model.current_time_step], model.storageupper[storage_nodes, model.current_time_step])
def get_current_cost(model):
current_cost= {}
for link in model.river_section:
current_cost[link]= model.cost[link, model.current_time_step]
return current_cost
def objective_function(model):
return summation(get_current_cost(model), model.Q)
##======================================== Declaring constraints
# Mass balance for non-storage nodes:
def mass_balance_agricultural(model, agricultural_nodes):
# inflow
#nonstorage_nodes
term2 = sum([model.Q[node_in, agricultural_nodes]*model.flow_multiplier[node_in, agricultural_nodes, model.current_time_step]
for node_in in model.nodes if (node_in, agricultural_nodes) in model.river_section])
# outflow
term3 = sum([model.Q[agricultural_nodes, node_out]
for node_out in model.nodes if (agricultural_nodes, node_out) in model.river_section])
term4 = model.consumption_coefficient[agricultural_nodes] \
* sum([model.Q[node_in, agricultural_nodes]*model.flow_multiplier[node_in, agricultural_nodes, model.current_time_step]
for node_in in model.nodes if (node_in, agricultural_nodes) in model.river_section])
# inflow - outflow = 0:
return term2 - (term3 + term4) == 0
def mass_balance_urban(model, urban_nodes):
#nonstorage_nodes
term1 = sum([model.Q[node_in, urban_nodes]*model.flow_multiplier[node_in, urban_nodes, model.current_time_step]
for node_in in model.nodes if (node_in, urban_nodes) in model.river_section])
term2 = model.consumption_coefficient[urban_nodes] \
* sum([model.Q[node_in, urban_nodes]*model.flow_multiplier[node_in, urban_nodes, model.current_time_step]
for node_in in model.nodes if (node_in, urban_nodes) in model.river_section])
term3 = sum([model.Q[urban_nodes, node_out]
for node_out in model.nodes if (urban_nodes, node_out) in model.river_section])
# inflow - outflow = 0:
return term1 - (term2 + term3) == 0
def mass_balance_junction(model, junction_nodes):
# inflow
term1 = sum([model.Q[node_in, junction_nodes]*model.flow_multiplier[node_in, junction_nodes, model.current_time_step]
for node_in in model.nodes if (node_in, junction_nodes) in model.river_section])
# outflow
term2 = sum([model.Q[junction_nodes, node_out]
for node_out in model.nodes if (junction_nodes, node_out) in model.river_section])
return (term1 - term2) == 0
# Mass balance for storage nodes:
def storage_mass_balance(model, storage_nodes):
# inflow
term1 = model.inflow[storage_nodes, model.current_time_step]
term2 = sum([model.Q[node_in, storage_nodes]*model.flow_multiplier[node_in, storage_nodes, model.current_time_step]
for node_in in model.nodes if (node_in, storage_nodes) in model.river_section])
# outflow
term3 = sum([model.Q[storage_nodes, node_out]
for node_out in model.nodes if (storage_nodes, node_out) in model.river_section])
# storage
term4 = model.initial_storage[storage_nodes]
term5 = model.S[storage_nodes]
# inflow - outflow = 0:
return (term1 + term2 + term4) - (term3 + term5) == 0
def get_storage(instance):
storage={}
for var in instance.component_objects(Var):
if(var=="S"):
s_var = getattr(instance, var)
for vv in s_var:
name= ''.join(map(str,vv))
storage[name]=(s_var[vv].value)
return storage
def set_initial_storage(instance, storage):
for var in instance.component_objects(Param):
if(var=="initial_storage"):
s_var = getattr(instance, var)
for vv in s_var:
s_var[vv]=storage[vv]
def get_demand_nodes_list(instance):
list={}
for comp in instance.component_objects():
if(str(comp)=="agricultural"):
parmobject = getattr(instance, str(comp))
for vv in parmobject.value:
for comp_2 in instance.component_objects():
if(str(comp_2)=="consumption_coefficient"):
parmobject_2 = getattr(instance, str(comp_2))
for vv2 in parmobject_2:
list[vv]=parmobject_2[vv2]
elif(str(comp)=="urban"):
parmobject = getattr(instance, str(comp))
for vv in parmobject.value:
for comp_2 in instance.component_objects():
if(str(comp_2)=="consumption_coefficient"):
parmobject_2 = getattr(instance, str(comp_2))
for vv2 in parmobject_2:
list[vv]=parmobject_2[vv2]
return list
def set_delivery(instance, demand_nodes, cs):
for var in instance.component_objects(Var):
if(str(var)=="delivery"):
s_var = getattr(instance, str(var))
for vv in s_var:
#s_var[vv]=-2
if(vv in demand_nodes.keys()):
sum=0
q=0
flow_m=0
for var_2 in instance.component_objects():
if(str(var_2)=="Q"):
s_var_2 = getattr(instance, str(var_2))
for vv2 in s_var_2:
if(vv == vv2[1]):
q=s_var_2[vv2].value
if(flow_m is not 0):
sum=sum+q*flow_m
q=0
flow_m=0
if(str(var_2)=="flow_multiplier"):
s_var_2 = getattr(instance, str(var_2))
for vv2 in s_var_2:
if(vv == vv2[1] and cs== vv2[2]):
flow_m=s_var_2[vv2]
if(q is not 0):
sum=sum+q*flow_m
q=0
flow_m=0
#print flow_m, q
s_var[vv]=sum
def run_model(datafile):
pymodel=PyMode()
return pymodel.run(datafile)
if __name__ == '__main__':
pymodel=PyMode()
pymodel.run("input.dat")
|
UMWRG/demos
|
CostMinimisationDemo/model/pyomo/cost_minimisation.py
|
Python
|
gpl-3.0
| 12,599
|
#!/usr/bin/python3
# -*- coding: utf-8, vim: expandtab:ts=4 -*-
# Miscellaneous tools for HunTag
from operator import itemgetter
from collections import Counter, defaultdict
from itertools import count
import sys
import gzip
def sentenceIterator(inputStream):
currSen = []
currComment = None
for line in inputStream:
line = line.strip()
# Comment handling
if line.startswith('"""'):
if len(currSen) == 0: # Comment before sentence
currComment = line
else: # Error: Comment in the middle of sentence
print('ERROR: comments are only allowed before a sentence!', file=sys.stderr, flush=True)
sys.exit(1)
# Blank line handling
elif len(line) == 0:
if currSen: # End of sentence
yield currSen, currComment
currSen = []
currComment = None
else: # Error: Multiple blank line
print('ERROR: wrong formatted sentences, only one blank line allowed!', file=sys.stderr, flush=True)
sys.exit(1)
else:
currSen.append(line.split())
# XXX Here should be an error because of missing blank line before EOF
if currSen:
print('WARNING: No blank line before EOF!', file=sys.stderr, flush=True)
yield currSen, currComment
def featurizeSentence(sen, features):
sentenceFeats = [[] for _ in sen]
for feature in features.values():
for c, feats in enumerate(feature.evalSentence(sen)):
sentenceFeats[c] += feats
return sentenceFeats
# Keeps Feature/Label-Number translation maps, for faster computations
class BookKeeper:
def __init__(self, loadfromfile=None):
self._counter = Counter()
# Original source: (1.31) http://sahandsaba.com/thirty-python-language-features-and-tricks-you-may-not-know.html
self._nameToNo = defaultdict(count().__next__)
self.noToName = {} # This is built only upon reading back from file
if loadfromfile is not None:
self._nameToNo.default_factory = count(start=self.load(loadfromfile)).__next__
def makeInvertedDict(self):
self.noToName = {} # This is built only upon reading back from file
for name, no in self._nameToNo.items():
self.noToName[no] = name
def numOfNames(self):
return len(self._nameToNo)
def makenoToName(self):
self.noToName = {v: k for k, v in self._nameToNo.items()}
def cutoff(self, cutoff):
toDelete = {self._nameToNo.pop(name) for name, counts in self._counter.items() if counts < cutoff}
del self._counter
newNameNo = {name: i for i, (name, _) in enumerate(sorted(self._nameToNo.items(), key=itemgetter(1)))}
del self._nameToNo
self._nameToNo = newNameNo
return toDelete
def getNoTag(self, name):
return self._nameToNo.get(name) # Defaults to None
def getNoTrain(self, name):
self._counter[name] += 1
return self._nameToNo[name] # Starts from 0 newcomers will get autoincremented value and stored
def save(self, filename):
with gzip.open(filename, mode='wt', encoding='UTF-8') as f:
f.writelines('{}\t{}\n'.format(name, no) for name, no in sorted(self._nameToNo.items(), key=itemgetter(1)))
def load(self, filename):
no = 0 # Last no
with gzip.open(filename, mode='rt', encoding='UTF-8') as f:
for line in f:
l = line.strip().split()
name, no = l[0], int(l[1])
self._nameToNo[name] = no
self.noToName[no] = name
return no
|
dlt-rilmta/hunlp-GATE
|
Lang_Hungarian/resources/huntag3/tools.py
|
Python
|
gpl-3.0
| 3,699
|
# -*- coding: utf-8 -*-
import gtk
from pygtktalog import logger
UI = """
<ui>
<menubar name="MenuBar">
<menu action="File">
<menuitem action="New"/>
<menuitem action="Open"/>
<menuitem action="Save"/>
<menuitem action="Save As"/>
<separator/>
<menuitem action="Import"/>
<menuitem action="Export"/>
<separator/>
<menuitem action="Recent"/>
<separator/>
<menuitem action="Quit"/>
</menu>
<menu action="Edit">
<menuitem action="Delete"/>
<separator/>
<menuitem action="Find"/>
<separator/>
<menuitem action="Preferences"/>
</menu>
<menu action="Catalog">
<menuitem action="Add_CD"/>
<menuitem action="Add_Dir"/>
<separator/>
<menuitem action="Delete_all_images"/>
<menuitem action="Delete_all_thumbnails"/>
<menuitem action="Save_all_images"/>
<separator/>
<menuitem action="Catalog_statistics"/>
<separator/>
<menuitem action="Cancel"/>
</menu>
<menu action="View">
<menuitem action="Toolbar"/>
<menuitem action="Statusbar"/>
</menu>
<menu action="Help">
<menuitem action="About"/>
</menu>
</menubar>
<toolbar name="ToolBar">
<toolitem action="New"/>
<toolitem action="Open"/>
<toolitem action="Save"/>
<separator/>
<toolitem action="Add_CD"/>
<toolitem action="Add_Dir"/>
<toolitem action="Find"/>
<separator/>
<toolitem action="Cancel"/>
<toolitem action="Quit"/>
<toolitem action="Debug"/>
</toolbar>
</ui>
"""
LOG = logger.get_logger(__name__)
LOG.setLevel(2)
class ConnectedWidgets(object):
"""grouped widgets"""
def __init__(self, toolbar, menu):
super(ConnectedWidgets, self).__init__()
self.toolbar = toolbar
self.menu = menu
def hide(self):
self.toolbar.hide()
self.menu.hide()
def show(self):
self.toolbar.show()
self.menu.show()
def set_sensitive(self, state):
self.toolbar.set_sensitive(state)
self.menu.set_sensitive(state)
class MainWindow(object):
def __init__(self, debug=False):
"""Initialize window"""
LOG.debug("initialize")
self.window = gtk.Window()
self.window.set_default_size(650, -1)
self.window.set_title("pygtktalog")
self.window.connect("delete-event", self.on_quit)
self.recent = None
self.toolbar = None
self.statusbar = None
self.cancel = None
self.debug = None
vbox = gtk.VBox(False, 0)
self._setup_menu_toolbar(vbox)
# TODO:
# 1. toolbar with selected tags
# 2. main view (splitter)
# 3. treeview with tag cloud (left split)
# 4. splitter (right split)
# 5. file list (upper split)
# 6. details w images and thumb (lower split)
# 7. status bar (if needed…)
hbox = gtk.HBox(False, 0)
vbox.add(hbox)
self.window.add(vbox)
self.window.show_all()
self.debug.hide()
def fake_recent(self):
recent_menu = gtk.Menu()
for i in "one two techno foo bar baz".split():
item = gtk.MenuItem(i)
item.connect_object("activate", self.on_recent,
"/some/fake/path/" + i)
recent_menu.append(item)
item.show()
self.recent.set_submenu(recent_menu)
def _setup_menu_toolbar(self, vbox):
"""Create menu/toolbar using uimanager."""
actions = [('File', None, '_File'),
('New', gtk.STOCK_NEW, '_New', None,
'Create new catalog', self.on_new),
('Open', gtk.STOCK_OPEN, '_Open', None,
'Open catalog file', self.on_open),
('Save', gtk.STOCK_SAVE, '_Save', None,
'Save catalog file', self.on_save),
('Save As', gtk.STOCK_SAVE_AS,
'_Save As', None, None, self.on_save),
('Import', None, '_Import', None, None, self.on_import),
('Export', None, '_Export', None, None, self.on_export),
('Recent', None, '_Recent files'),
('Quit', gtk.STOCK_QUIT, '_Quit', None,
'Quit the Program', self.on_quit),
('Edit', None, '_Edit'),
('Delete', gtk.STOCK_DELETE, '_Delete', None, None,
self.on_delete),
('Find', gtk.STOCK_FIND, '_Find', None, 'Find file',
self.on_find),
('Preferences', gtk.STOCK_PREFERENCES, '_Preferences'),
('Catalog', None, '_Catalog'),
('Add_CD', gtk.STOCK_CDROM, '_Add CD', None,
'Add CD/DVD/BR to catalog'),
('Add_Dir', gtk.STOCK_DIRECTORY, '_Add Dir', None,
'Add directory to catalog'),
('Delete_all_images', None, '_Delete all images'),
('Delete_all_thumbnails', None, '_Delete all thumbnails'),
('Save_all_images', None, '_Save all images…'),
('Catalog_statistics', None, '_Catalog statistics'),
('Cancel', gtk.STOCK_CANCEL, '_Cancel'),
('View', None, '_View'),
('Help', None, '_Help'),
('About', gtk.STOCK_ABOUT, '_About'),
('Debug', gtk.STOCK_DIALOG_INFO, 'Debug')]
toggles = [('Toolbar', None, '_Toolbar'),
('Statusbar', None, '_Statusbar')]
mgr = gtk.UIManager()
accelgrp = mgr.get_accel_group()
self.window.add_accel_group(accelgrp)
agrp = gtk.ActionGroup("Actions")
agrp.add_actions(actions)
agrp.add_toggle_actions(toggles)
mgr.insert_action_group(agrp, 0)
mgr.add_ui_from_string(UI)
help_widget = mgr.get_widget("/MenuBar/Help")
help_widget.set_right_justified(True)
self.recent = mgr.get_widget("/MenuBar/File/Recent")
self.fake_recent()
menubar = mgr.get_widget("/MenuBar")
vbox.pack_start(menubar)
self.toolbar = mgr.get_widget("/ToolBar")
vbox.pack_start(self.toolbar)
menu_cancel = mgr.get_widget('/MenuBar/Catalog/Cancel')
toolbar_cancel = mgr.get_widget('/ToolBar/Cancel')
self.cancel = ConnectedWidgets(toolbar_cancel, menu_cancel)
self.cancel.set_sensitive(False)
self.debug = mgr.get_widget('/ToolBar/Debug')
self.toolbar = mgr.get_widget('/MenuBar/View/Toolbar')
self.statusbar = mgr.get_widget('/MenuBar/View/Statusbar')
def on_new(self, *args, **kwargs):
LOG.debug("On new")
return
def on_open(self, *args, **kwargs):
LOG.debug("On open")
return
def on_save(self, *args, **kwargs):
LOG.debug("On save")
return
def on_save_as(self, *args, **kwargs):
LOG.debug("On save as")
return
def on_import(self, *args, **kwargs):
LOG.debug("On import")
return
def on_export(self, *args, **kwargs):
LOG.debug("On export")
return
def on_recent(self, *args, **kwargs):
LOG.debug("On recent")
print args, kwargs
def on_quit(self, *args, **kwargs):
LOG.debug("on quit")
gtk.main_quit()
def on_delete(self, *args, **kwargs):
LOG.debug("On delete")
return
def on_find(self, *args, **kwargs):
LOG.debug("On find")
return
def on_about(self, event, menuitem):
LOG.debug("about", event, menuitem)
return
def run():
MainWindow()
gtk.mainloop()
|
gryf/pygtktalog
|
pygtktalog/gtk2/gui.py
|
Python
|
gpl-3.0
| 7,824
|
from __future__ import division
import math as m
import numpy as np
from numpy import nan, any, array, asarray, ones, arange, delete, where
from numpy.random import permutation
from .utils import medsig
from .core import *
class DtData(object):
"""Utility class that encapsulates the fluxes and inputs for a detrender.
"""
def __init__(self, flux, inputs, mask=None):
self._flux = array(flux)
self._inputs = array(inputs)
self._mask = array(mask) if mask is not None else ones(self._flux.size, np.bool)
self._fm, self._fs = medsig(self.masked_flux)
self.nptm = self.masked_flux.size
self.nptu = self.unmasked_flux.size
assert self._flux.ndim == 1, 'The flux array for DtData should be 1D [npt]'
assert self._inputs.ndim == 2, 'The input array for DtData should be 2D [npt,3]'
assert self._inputs.shape[1] == 3, 'The input array for DtData should be 2D with the shape [npt,3]'
self.mf, self.uf = self.masked_flux, self.unmasked_flux
self.mi, self.ui = self.masked_inputs, self.unmasked_inputs
self.mt, self.ut = self.masked_time, self.unmasked_time
self.mx, self.ux = self.masked_x, self.unmasked_x
self.my, self.uy = self.masked_y, self.unmasked_y
def create_training_set(self, nrandom=100, nblocks=5, bspan=50, max_tries=100):
"""Creates a new DtData instance containing a subset of the original data.
Creates a new DtData instance made of nblocks non-overlapping chunks of bspan
consecutive datapoints (excluding the masked points) and nrandom datapoints
drawn randomly from the leftover points. This is done in order to try to include
both short- and long-time-scale trends to the training set.
Parameters
----------
nrandom : int, optional
Number of randomly drawn points
nblocks : int, optional
Number of blocks to draw
bspan : int, optional
Size of a block
max_tries : int, optional
Maximum number of tries to create a consecutive block that doesn't
overlap with any of the previously created blocks
Returns
-------
DtData
The training dataset as a new DtData instance.
"""
ids = where(self.mask)[0] ## Get the masked indices
bstarts = np.full(nblocks, -bspan, np.int) ## Starting indices for blocks
## Chunks
## ------
## Attempt to draw nblock non-overlapping chunks of bspan consecutive points
## from the data excluding the masked points. If this fails, the leftover
## points will be added to the random sample set.
i, j = 0, 0
while True:
if j == nblocks or i > max_tries:
break
idx = np.random.randint(0, ids.size-bspan)
if not any((idx >= bstarts) & (idx < bstarts+bspan)):
bstarts[j] = idx
j += 1
i += 1
block_ids = (np.sort(bstarts)[:,np.newaxis] + arange(bspan)[np.newaxis,:]).ravel()
npt_blocks = block_ids.size
npt_random = min(ids.size, nrandom+bspan*nblocks-npt_blocks)
idb = ids[block_ids]
idr = np.sort((permutation(delete(ids, block_ids))[:npt_random]))
tr_ids = np.sort(np.r_[idr, idb])
trd = DtData(self.unmasked_flux[tr_ids], self.unmasked_inputs[tr_ids])
trd._fm, trd._fs = self._fm, self._fs
return trd
@property
def flux_median(self):
return self._fm
@property
def flux_std(self):
return self._fs
@property
def mask(self):
return self._mask
@mask.setter
def mask(self, mask):
mask = array(mask)
assert mask.ndim == 1, 'The mask array for DtData should be 1D'
assert mask.size == self.flux.size, 'The mask array for DtData should have the same size as the flux array'
assert mask.dtype == np.bool, 'The mask array should be boolean'
self._mask = mask
@property
def masked_time(self):
return self._inputs[self.mask,0]
@property
def unmasked_time(self):
return self._inputs[:,0]
@property
def masked_flux(self):
return self._flux[self.mask]
@property
def unmasked_flux(self):
return self._flux
@property
def masked_normalised_flux(self):
return self._flux[self.mask] / self._fm - 1.
@property
def unmasked_normalised_flux(self):
return self._flux / self._fm - 1.
@property
def masked_inputs(self):
return self._inputs[self.mask, :]
@property
def unmasked_inputs(self):
return self._inputs
@property
def masked_x(self):
return self._inputs[self.mask, 1]
@property
def unmasked_x(self):
return self._inputs[:, 1]
@property
def masked_y(self):
return self._inputs[self.mask, 2]
@property
def unmasked_y(self):
return self._inputs[:, 2]
|
OxES/k2sc
|
src/dtdata.py
|
Python
|
gpl-3.0
| 5,158
|
# Outspline - A highly modular and extensible outliner.
# Copyright (C) 2011 Dario Giovannetti <dev@dariogiovannetti.net>
#
# This file is part of Outspline.
#
# Outspline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Outspline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outspline. If not, see <http://www.gnu.org/licenses/>.
class OutsplineError(Exception):
pass
class AddonDisabledError(OutsplineError):
pass
class ExtensionProvidedTablesError(OutsplineError):
def __init__(self, tables, extensions):
self.tables = tables
self.extensions = extensions
OutsplineError.__init__(self)
class AddonVersionError(OutsplineError):
def __init__(self, version):
self.version = version
OutsplineError.__init__(self)
class AddonNotFoundError(OutsplineError):
pass
class AddonDependencyError(OutsplineError):
pass
class MultipleInterfacesError(OutsplineError):
pass
class InterfaceNotFoundError(OutsplineError):
pass
|
kynikos/outspline
|
src/outspline/coreaux/exceptions.py
|
Python
|
gpl-3.0
| 1,452
|
# Caesar Cipher - Python Code - Elizabeth Tweedale
# t is the text string for decoding/encoding
# k is the key integer
# decode is a boolean
def caesar(t, k, decode = False):
if decode: k = 26 - k # check if you are decoding or encoding
# if decode = True, shift the key forward
# to 26 - the key amount
# (returning it to its original position)
return "".join([chr((ord(i) - 65 + k) % 26 + 65) # the math behind shifting our letters
for i in t.upper() # for every letter in the text
if ord(i) >= 65 and ord(i) <= 90 ]) # check if the character is a letter between A-Z
# Test the code:
# Change the text and key to test different messages
text = "The quick brown fox jumped over the lazy dogs"
key = 11
encr = caesar(text, key)
decr = caesar(encr, key, decode = True)
print (text)
print (encr)
print (decr)
# Output:
# Plain text = The quick brown fox jumped over the lazy dogs
# Encrypted text = ESPBFTNVMCZHYQZIUFXAPOZGPCESPWLKJOZRD
# Decrypted text = THEQUICKBROWNFOXJUMPEDOVERTHELAZYDOGS
|
elizabethtweedale/HowToCode2
|
SuperSkill-05-Spy/caesar_cipher.py
|
Python
|
gpl-3.0
| 1,183
|
from GangaCore.GPIDev.Base.Proxy import stripProxy
def test_export(gpi, tmpdir):
files = [gpi.LocalFile() for _ in range(100)]
d = gpi.GangaDataset(files=files)
fn = str(tmpdir.join('ganga-export'))
gpi.export(d, fn)
def test_roundtrip(gpi, tmpdir):
files = [gpi.LocalFile() for _ in range(100)]
d = gpi.GangaDataset(files=files)
fn = str(tmpdir.join('ganga-export'))
gpi.export(d, fn)
d2 = gpi.load(fn)[0]
d = stripProxy(d)
d2 = stripProxy(d2)
assert type(d) == type(d2)
assert len(d) == len(d2)
assert d == d2
|
ganga-devs/ganga
|
ganga/GangaCore/test/GPI/TestPersistency.py
|
Python
|
gpl-3.0
| 574
|
# -*- coding: utf-8 -*-
import re
from channels import renumbertools
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values()
list_servers = ['openload',
'okru',
'netutv',
'rapidvideo'
]
list_quality = ['default']
host = "https://serieslan.com"
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(
Item(channel=item.channel, action="lista", title="Series", url=host, thumbnail=thumb_series, page=0))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
patron = '<a href="([^"]+)" '
patron += 'class="link">.+?<img src="([^"]+)".*?'
patron += 'title="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron)
# Paginacion
num_items_x_pagina = 30
min = item.page * num_items_x_pagina
min=min-item.page
max = min + num_items_x_pagina - 1
for link, img, name in matches[min:max]:
title = name
url = host + link
scrapedthumbnail = host + img
context1=[renumbertools.context(item), autoplay.context]
itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,
context=context1))
itemlist.append(
Item(channel=item.channel, title="Página Siguiente >>", url=item.url, action="lista", page=item.page + 1))
tmdb.set_infoLabels(itemlist)
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# obtener el numero total de episodios
total_episode = 0
patron_caps = '<li><span>Capitulo ([^"]+)\:<\/span><[^"]+"(.+?)">([^"]+)<[^"]+<\/li>'
matches = scrapertools.find_multiple_matches(data, patron_caps)
# data_info = scrapertools.find_single_match(data, '<div class="info">.+?<\/div><\/div>')
patron_info = '<img src="([^"]+)">.+?<\/span>([^"]+)<\/p><p><span>I.+?Reseña: <\/span>(.+?)<\/p><\/div>'
scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(data, patron_info)
scrapedthumbnail = host + scrapedthumbnail
for cap, link, name in matches:
title = ""
pat = "/"
# varios episodios en un enlace
if len(name.split(pat)) > 1:
i = 0
for pos in name.split(pat):
i = i + 1
total_episode += 1
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, total_episode)
if len(name.split(pat)) == i:
title += "%sx%s " % (season, str(episode).zfill(2))
else:
title += "%sx%s_" % (season, str(episode).zfill(2))
else:
total_episode += 1
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, total_episode)
title += "%sx%s " % (season, str(episode).zfill(2))
url = host + "/" + link
if "disponible" in link:
title += "No Disponible aún"
else:
title += name
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, plot=scrapedplot,
thumbnail=scrapedthumbnail))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def findvideos(item):
logger.info()
import base64
itemlist = []
url_server = "https://openload.co/embed/%s/"
url_api_get_key = "https://serieslan.com/ide.php?i=%s&k=%s"
def txc(key, _str):
s = range(256)
j = 0
res = ''
for i in range(256):
j = (j + s[i] + ord(key[i % len(key)])) % 256
x = s[i]
s[i] = s[j]
s[j] = x
i = 0
j = 0
for y in range(len(_str)):
i = (i + 1) % 256
j = (j + s[i]) % 256
x = s[i]
s[i] = s[j]
s[j] = x
res += chr(ord(_str[y]) ^ s[(s[i] + s[j]) % 256])
return res
data = httptools.downloadpage(item.url).data
pattern = '<div id="video" idv="([^"]*)" ide="([^"]*)" ids="[^"]*" class="video">'
idv, ide = scrapertools.find_single_match(data, pattern)
thumbnail = scrapertools.find_single_match(data,
'<div id="tab-1" class="tab-content current">.+?<img src="([^"]*)">')
show = scrapertools.find_single_match(data, '<span>Episodio: <\/span>([^"]*)<\/p><p><span>Idioma')
thumbnail = host + thumbnail
data = httptools.downloadpage(url_api_get_key % (idv, ide), headers={'Referer': item.url}).data
data = eval(data)
if type(data) == list:
logger.debug("inside")
video_url = url_server % (txc(ide, base64.decodestring(data[2])))
server = "openload"
if " SUB" in item.title:
lang = "VOS"
elif " Sub" in item:
lang = "VOS"
else:
lang = "Latino"
title = "Enlace encontrado en " + server + " [" + lang + "]"
itemlist.append(Item(channel=item.channel, action="play", title=title, show=show, url=video_url, plot=item.plot,
thumbnail=thumbnail, server=server, folder=False))
autoplay.start(itemlist, item)
return itemlist
else:
return []
def play(item):
logger.info()
itemlist = []
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(item.url, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(item.url, skip=True)
if devuelve:
# logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail, folder=False))
return itemlist
|
pitunti/alfaPitunti
|
plugin.video.alfa/channels/serieslan.py
|
Python
|
gpl-3.0
| 6,772
|
import os
import mu_repo
from .utils import push_dir
def set_up(workdir):
join = os.path.join
paths = {
'dir1': join(workdir, 'projectA', 'sectionX'),
'dir2': join(workdir, 'projectB', 'sectionY'),
'repo1': join(workdir, 'projectA', 'sectionX', 'repo1'),
'repo2': join(workdir, 'projectB', 'sectionY', 'repo2'),
'link1': join(workdir, 'projectA', 'sectionX', 'link1'),
'link2': join(workdir, 'projectB', 'sectionY', 'link2'),
}
# Mark repositories
os.makedirs(join(paths['repo1'], '.git'))
os.makedirs(join(paths['repo2'], '.git'))
return paths
def test_direct_symlink(workdir):
"""Linking directly to a repository inside of initial search path"""
paths = set_up(workdir)
os.symlink(paths['repo1'], paths['link1'])
with push_dir('projectA'):
status = mu_repo.main(config_file='.bar_file', args=['register', '--recursive'])
assert status.succeeded
assert status.config.repos == ['sectionX/repo1']
def test_indirect_symlink(workdir):
"""Linking to an ancestor of a repository"""
paths = set_up(workdir)
os.symlink(paths['dir1'], paths['link1'])
with push_dir('projectA'):
status = mu_repo.main(config_file='.bar_file', args=['register', '--recursive'])
assert status.succeeded
assert status.config.repos == ['sectionX/repo1']
def test_search_path_expansion(workdir):
"""Linking to a repository outside of initial search path"""
paths = set_up(workdir)
os.symlink(paths['repo2'], paths['link1'])
with push_dir('projectA'):
status = mu_repo.main(config_file='.bar_file', args=['register', '--recursive'])
assert status.succeeded
assert set(status.config.repos) == set(['sectionX/repo1', '../projectB/sectionY/repo2'])
def test_infinite_cycle(workdir):
"""Linking to own ancestor directory"""
paths = set_up(workdir)
os.symlink(paths['dir1'], paths['link1'])
with push_dir('projectA'):
status = mu_repo.main(config_file='.bar_file', args=['register', '--recursive'])
assert status.succeeded
assert status.config.repos == ['sectionX/repo1']
def test_infinite_cycle_ouside(workdir):
"""Linking to own ancestor directory in expanded search path"""
paths = set_up(workdir)
os.symlink(paths['dir2'], paths['link1'])
os.symlink(paths['dir2'], paths['link2'])
with push_dir('projectA'):
status = mu_repo.main(config_file='.bar_file', args=['register', '--recursive'])
assert status.succeeded
assert set(status.config.repos) == set(['sectionX/repo1', '../projectB/sectionY/repo2'])
|
fabioz/mu-repo
|
mu_repo/tests/test_register.py
|
Python
|
gpl-3.0
| 2,664
|
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: zoidberg
"""
from __future__ import with_statement
import hashlib
import zlib
from os import remove
from os.path import getsize, isfile, splitext
import re
from module.utils import save_join, fs_encode
from module.plugins.Hook import Hook
def computeChecksum(local_file, algorithm):
if algorithm in getattr(hashlib, "algorithms", ("md5", "sha1", "sha224", "sha256", "sha384", "sha512")):
h = getattr(hashlib, algorithm)()
chunk_size = 128 * h.block_size
with open(local_file, 'rb') as f:
for chunk in iter(lambda: f.read(chunk_size), ''):
h.update(chunk)
return h.hexdigest()
elif algorithm in ("adler32", "crc32"):
hf = getattr(zlib, algorithm)
last = 0
with open(local_file, 'rb') as f:
for chunk in iter(lambda: f.read(8192), ''):
last = hf(chunk, last)
return "%x" % last
else:
return None
class Checksum(Hook):
__name__ = "Checksum"
__version__ = "0.10"
__description__ = "Verify downloaded file size and checksum (enable in general preferences)"
__config__ = [("activated", "bool", "Activated", True),
("action", "fail;retry;nothing", "What to do if check fails?", "retry"),
("max_tries", "int", "Number of retries", 2)]
__author_name__ = ("zoidberg")
__author_mail__ = ("zoidberg@mujmail.cz")
methods = {'sfv': 'crc32', 'crc': 'crc32', 'hash': 'md5'}
regexps = {'sfv': r'^(?P<name>[^;].+)\s+(?P<hash>[0-9A-Fa-f]{8})$',
'md5': r'^(?P<name>[0-9A-Fa-f]{32}) (?P<file>.+)$',
'crc': r'filename=(?P<name>.+)\nsize=(?P<size>\d+)\ncrc32=(?P<hash>[0-9A-Fa-f]{8})$',
'default': r'^(?P<hash>[0-9A-Fa-f]+)\s+\*?(?P<name>.+)$'}
def setup(self):
if not self.config['general']['checksum']:
self.logInfo("Checksum validation is disabled in general configuration")
self.algorithms = sorted(
getattr(hashlib, "algorithms", ("md5", "sha1", "sha224", "sha256", "sha384", "sha512")), reverse=True)
self.algorithms.extend(["crc32", "adler32"])
self.formats = self.algorithms + ['sfv', 'crc', 'hash']
def downloadFinished(self, pyfile):
"""
Compute checksum for the downloaded file and compare it with the hash provided by the hoster.
pyfile.plugin.check_data should be a dictionary which can contain:
a) if known, the exact filesize in bytes (e.g. "size": 123456789)
b) hexadecimal hash string with algorithm name as key (e.g. "md5": "d76505d0869f9f928a17d42d66326307")
"""
if hasattr(pyfile.plugin, "check_data") and (isinstance(pyfile.plugin.check_data, dict)):
data = pyfile.plugin.check_data.copy()
elif hasattr(pyfile.plugin, "api_data") and (isinstance(pyfile.plugin.api_data, dict)):
data = pyfile.plugin.api_data.copy()
else:
return
self.logDebug(data)
if not pyfile.plugin.lastDownload:
self.checkFailed(pyfile, None, "No file downloaded")
local_file = fs_encode(pyfile.plugin.lastDownload)
#download_folder = self.config['general']['download_folder']
#local_file = fs_encode(save_join(download_folder, pyfile.package().folder, pyfile.name))
if not isfile(local_file):
self.checkFailed(pyfile, None, "File does not exist")
# validate file size
if "size" in data:
api_size = int(data['size'])
file_size = getsize(local_file)
if api_size != file_size:
self.logWarning("File %s has incorrect size: %d B (%d expected)" % (pyfile.name, file_size, api_size))
self.checkFailed(pyfile, local_file, "Incorrect file size")
del data['size']
# validate checksum
if data and self.config['general']['checksum']:
if "checksum" in data:
data['md5'] = data['checksum']
for key in self.algorithms:
if key in data:
checksum = computeChecksum(local_file, key.replace("-", "").lower())
if checksum:
if checksum == data[key].lower():
self.logInfo('File integrity of "%s" verified by %s checksum (%s).' % (pyfile.name,
key.upper(),
checksum))
return
else:
self.logWarning("%s checksum for file %s does not match (%s != %s)" % (key.upper(),
pyfile.name,
checksum,
data[key]))
self.checkFailed(pyfile, local_file, "Checksums do not match")
else:
self.logWarning("Unsupported hashing algorithm: %s" % key.upper())
else:
self.logWarning("Unable to validate checksum for file %s" % pyfile.name)
def checkFailed(self, pyfile, local_file, msg):
action = self.getConfig("action")
if action == "fail":
pyfile.plugin.fail(reason=msg)
elif action == "retry":
if local_file:
remove(local_file)
pyfile.plugin.retry(reason=msg, max_tries=self.getConfig("max_tries"))
def packageFinished(self, pypack):
download_folder = save_join(self.config['general']['download_folder'], pypack.folder, "")
for link in pypack.getChildren().itervalues():
file_type = splitext(link["name"])[1][1:].lower()
#self.logDebug(link, file_type)
if file_type not in self.formats:
continue
hash_file = fs_encode(save_join(download_folder, link["name"]))
if not isfile(hash_file):
self.logWarning("File not found: %s" % link["name"])
continue
with open(hash_file) as f:
text = f.read()
for m in re.finditer(self.regexps.get(file_type, self.regexps['default']), text):
data = m.groupdict()
self.logDebug(link["name"], data)
local_file = fs_encode(save_join(download_folder, data["name"]))
algorithm = self.methods.get(file_type, file_type)
checksum = computeChecksum(local_file, algorithm)
if checksum == data["hash"]:
self.logInfo('File integrity of "%s" verified by %s checksum (%s).' % (data["name"],
algorithm,
checksum))
else:
self.logWarning("%s checksum for file %s does not match (%s != %s)" % (algorithm,
data["name"],
checksum,
data["hash"]))
|
chaosmaker/pyload
|
module/plugins/hooks/Checksum.py
|
Python
|
gpl-3.0
| 8,302
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from pkgutil import iter_modules
from . import fixtures
from .fixtures import models as fixtures_models
from .fixtures.core import management as fixtures_core_management
from .fixtures.core import utils as fixtures_core_utils
from .fixtures import formats as fixtures_formats
from .fixtures import pootle_fs as fixtures_fs
def _load_fixtures(*modules):
for mod in modules:
path = mod.__path__
prefix = '%s.' % mod.__name__
for loader_, name, is_pkg in iter_modules(path, prefix):
if not is_pkg:
yield name
def pytest_addoption(parser):
parser.addoption(
"--debug-tests",
action="store",
default="",
help="Debug tests to a given file")
def pytest_configure(config):
# register an additional marker
config.addinivalue_line(
"markers", "pootle_vfolders: requires special virtual folder projects")
pytest_plugins = tuple(
_load_fixtures(
fixtures,
fixtures_core_management,
fixtures_core_utils,
fixtures_formats,
fixtures_models,
fixtures_fs))
for plugin in pytest_plugins:
config.pluginmanager.import_plugin(plugin)
|
claudep/pootle
|
pytest_pootle/plugin.py
|
Python
|
gpl-3.0
| 1,502
|
import threading
from sqlalchemy import Column, UnicodeText, Integer, String, Boolean
from tg_bot.modules.sql import BASE, SESSION
class GloballyBannedUsers(BASE):
__tablename__ = "gbans"
user_id = Column(Integer, primary_key=True)
name = Column(UnicodeText, nullable=False)
reason = Column(UnicodeText)
def __init__(self, user_id, name, reason=None):
self.user_id = user_id
self.name = name
self.reason = reason
def __repr__(self):
return "<GBanned User {} ({})>".format(self.name, self.user_id)
def to_dict(self):
return {"user_id": self.user_id,
"name": self.name,
"reason": self.reason}
class GbanSettings(BASE):
__tablename__ = "gban_settings"
chat_id = Column(String(14), primary_key=True)
setting = Column(Boolean, default=True, nullable=False)
def __init__(self, chat_id, enabled):
self.chat_id = str(chat_id)
self.setting = enabled
def __repr__(self):
return "<Gban setting {} ({})>".format(self.chat_id, self.setting)
GloballyBannedUsers.__table__.create(checkfirst=True)
GbanSettings.__table__.create(checkfirst=True)
GBANNED_USERS_LOCK = threading.RLock()
GBAN_SETTING_LOCK = threading.RLock()
GBANNED_LIST = set()
GBANSTAT_LIST = set()
def gban_user(user_id, name, reason=None):
with GBANNED_USERS_LOCK:
user = SESSION.query(GloballyBannedUsers).get(user_id)
if not user:
user = GloballyBannedUsers(user_id, name, reason)
else:
user.name = name
user.reason = reason
SESSION.merge(user)
SESSION.commit()
__load_gbanned_userid_list()
def update_gban_reason(user_id, name, reason=None):
with GBANNED_USERS_LOCK:
user = SESSION.query(GloballyBannedUsers).get(user_id)
if not user:
return None
old_reason = user.reason
user.name = name
user.reason = reason
SESSION.merge(user)
SESSION.commit()
return old_reason
def ungban_user(user_id):
with GBANNED_USERS_LOCK:
user = SESSION.query(GloballyBannedUsers).get(user_id)
if user:
SESSION.delete(user)
SESSION.commit()
__load_gbanned_userid_list()
def is_user_gbanned(user_id):
return user_id in GBANNED_LIST
def get_gbanned_user(user_id):
try:
return SESSION.query(GloballyBannedUsers).get(user_id)
finally:
SESSION.close()
def get_gban_list():
try:
return [x.to_dict() for x in SESSION.query(GloballyBannedUsers).all()]
finally:
SESSION.close()
def enable_gbans(chat_id):
with GBAN_SETTING_LOCK:
chat = SESSION.query(GbanSettings).get(str(chat_id))
if not chat:
chat = GbanSettings(chat_id, True)
chat.setting = True
SESSION.add(chat)
SESSION.commit()
if str(chat_id) in GBANSTAT_LIST:
GBANSTAT_LIST.remove(str(chat_id))
def disable_gbans(chat_id):
with GBAN_SETTING_LOCK:
chat = SESSION.query(GbanSettings).get(str(chat_id))
if not chat:
chat = GbanSettings(chat_id, False)
chat.setting = False
SESSION.add(chat)
SESSION.commit()
GBANSTAT_LIST.add(str(chat_id))
def does_chat_gban(chat_id):
return str(chat_id) not in GBANSTAT_LIST
def num_gbanned_users():
return len(GBANNED_LIST)
def __load_gbanned_userid_list():
global GBANNED_LIST
try:
GBANNED_LIST = {x.user_id for x in SESSION.query(GloballyBannedUsers).all()}
finally:
SESSION.close()
def __load_gban_stat_list():
global GBANSTAT_LIST
try:
GBANSTAT_LIST = {x.chat_id for x in SESSION.query(GbanSettings).all() if not x.setting}
finally:
SESSION.close()
def migrate_chat(old_chat_id, new_chat_id):
with GBAN_SETTING_LOCK:
chat = SESSION.query(GbanSettings).get(str(old_chat_id))
if chat:
chat.chat_id = new_chat_id
SESSION.add(chat)
SESSION.commit()
# Create in memory userid to avoid disk access
__load_gbanned_userid_list()
__load_gban_stat_list()
|
PaulSonOfLars/tgbot
|
tg_bot/modules/sql/global_bans_sql.py
|
Python
|
gpl-3.0
| 4,180
|
# -*- encoding: utf-8 -*-
"""
Factory object creation for all CLI methods
"""
import datetime
import json
import logging
import os
import random
import time
from fauxfactory import (
gen_alphanumeric,
gen_choice,
gen_integer,
gen_ipaddr,
gen_mac,
gen_netmask,
gen_string,
)
from os import chmod
from robottelo import manifests, ssh
from robottelo.api.utils import enable_rhrepo_and_fetchid
from robottelo.cli.activationkey import ActivationKey
from robottelo.cli.architecture import Architecture
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.computeresource import ComputeResource
from robottelo.cli.contentview import (
ContentView,
ContentViewFilter,
ContentViewFilterRule,
)
from robottelo.cli.discoveryrule import DiscoveryRule
from robottelo.cli.docker import DockerContainer, DockerRegistry
from robottelo.cli.domain import Domain
from robottelo.cli.environment import Environment
from robottelo.cli.filter import Filter
from robottelo.cli.gpgkey import GPGKey
from robottelo.cli.host import Host
from robottelo.cli.hostcollection import HostCollection
from robottelo.cli.hostgroup import HostGroup
from robottelo.cli.job_invocation import JobInvocation
from robottelo.cli.job_template import JobTemplate
from robottelo.cli.ldapauthsource import LDAPAuthSource
from robottelo.cli.lifecycleenvironment import LifecycleEnvironment
from robottelo.cli.location import Location
from robottelo.cli.medium import Medium
from robottelo.cli.model import Model
from robottelo.cli.operatingsys import OperatingSys
from robottelo.cli.org import Org
from robottelo.cli.partitiontable import PartitionTable
from robottelo.cli.product import Product
from robottelo.cli.proxy import CapsuleTunnelError, Proxy
from robottelo.cli.realm import Realm
from robottelo.cli.repository import Repository
from robottelo.cli.repository_set import RepositorySet
from robottelo.cli.role import Role
from robottelo.cli.scapcontent import Scapcontent
from robottelo.cli.subnet import Subnet
from robottelo.cli.subscription import Subscription
from robottelo.cli.syncplan import SyncPlan
from robottelo.cli.scap_policy import Scappolicy
from robottelo.cli.scap_tailoring_files import TailoringFiles
from robottelo.cli.template import Template
from robottelo.cli.user import User
from robottelo.cli.usergroup import UserGroup, UserGroupExternal
from robottelo.cli.smart_variable import SmartVariable
from robottelo.cli.virt_who_config import VirtWhoConfig
from robottelo.config import settings
from robottelo.constants import (
DEFAULT_ARCHITECTURE,
DEFAULT_LOC,
DEFAULT_ORG,
DEFAULT_PTABLE,
DEFAULT_PXE_TEMPLATE,
DEFAULT_SUBSCRIPTION_NAME,
DEFAULT_TEMPLATE,
DISTRO_RHEL7,
DISTROS_MAJOR_VERSION,
FAKE_1_YUM_REPO,
FOREMAN_PROVIDERS,
OPERATING_SYSTEMS,
PRDS,
REPOS,
REPOSET,
RHEL_6_MAJOR_VERSION,
RHEL_7_MAJOR_VERSION,
SYNC_INTERVAL,
TEMPLATE_TYPES,
)
from robottelo.datafactory import valid_cron_expressions
from robottelo.decorators import bz_bug_is_open, cacheable
from robottelo.helpers import (
update_dictionary, default_url_on_new_port, get_available_capsule_port
)
from robottelo.ssh import download_file, upload_file
from tempfile import mkstemp
from time import sleep
logger = logging.getLogger(__name__)
ORG_KEYS = ['organization', 'organization-id', 'organization-label']
CONTENT_VIEW_KEYS = ['content-view', 'content-view-id']
LIFECYCLE_KEYS = ['lifecycle-environment', 'lifecycle-environment-id']
class CLIFactoryError(Exception):
"""Indicates an error occurred while creating an entity using hammer"""
def create_object(cli_object, options, values):
"""
Creates <object> with dictionary of arguments.
:param cli_object: A valid CLI object.
:param dict options: The default options accepted by the cli_object
create
:param dict values: Custom values to override default ones.
:raise robottelo.cli.factory.CLIFactoryError: Raise an exception if object
cannot be created.
:rtype: dict
:return: A dictionary representing the newly created resource.
"""
if values:
diff = set(values.keys()).difference(set(options.keys()))
if diff:
logger.debug(
"Option(s) {0} not supported by CLI factory. Please check for "
"a typo or update default options".format(diff)
)
update_dictionary(options, values)
try:
result = cli_object.create(options)
except CLIReturnCodeError as err:
# If the object is not created, raise exception, stop the show.
raise CLIFactoryError(
u'Failed to create {0} with data:\n{1}\n{2}'.format(
cli_object.__name__,
json.dumps(options, indent=2, sort_keys=True),
err.msg,
)
)
# Sometimes we get a list with a dictionary and not
# a dictionary.
if type(result) is list and len(result) > 0:
result = result[0]
return result
def _entity_with_credentials(credentials, cli_entity_cls):
"""Create entity class using credentials. If credentials is None will
return cli_entity_cls itself
:param credentials: tuple (login, password)
:param cli_entity_cls: Cli Entity Class
:return: Cli Entity Class
"""
if credentials is not None:
cli_entity_cls = cli_entity_cls.with_user(*credentials)
return cli_entity_cls
@cacheable
def make_activation_key(options=None):
"""
Usage::
hammer activation-key create [OPTIONS]
Options::
--content-view CONTENT_VIEW_NAME Content view name to search by
--content-view-id CONTENT_VIEW_ID content view numeric identifier
--description DESCRIPTION description
--lifecycle-environment LIFECYCLE_ENVIRONMENT_NAME Name to search by
--lifecycle-environment-id LIFECYCLE_ENVIRONMENT_ID
--max-hosts MAX_CONTENT_HOSTS maximum number of registered
content hosts
--name NAME name
--organization ORGANIZATION_NAME Organization name to search by
--organization-id ORGANIZATION_ID
--organization-label ORGANIZATION_LABEL Organization label to search by
--unlimited-hosts UNLIMITED_CONTENT_HOSTS can the activation
key have unlimited
content hosts
"""
# Organization Name, Label or ID is a required field.
if (
not options or
not options.get('organization') and
not options.get('organization-label') and
not options.get('organization-id')):
raise CLIFactoryError('Please provide a valid Organization.')
args = {
u'content-view': None,
u'content-view-id': None,
u'description': None,
u'lifecycle-environment': None,
u'lifecycle-environment-id': None,
u'max-hosts': None,
u'name': gen_alphanumeric(),
u'organization': None,
u'organization-id': None,
u'organization-label': None,
u'unlimited-hosts': None,
}
return create_object(ActivationKey, args, options)
@cacheable
def make_architecture(options=None):
"""
Usage::
hammer architecture create [OPTIONS]
Options::
--name NAME
--operatingsystem-ids OPERATINGSYSTEM_IDS Operatingsystem ID’s
Comma separated list of values.
"""
args = {
u'name': gen_alphanumeric(),
u'operatingsystem-ids': None,
}
return create_object(Architecture, args, options)
def make_container(options=None):
"""Creates a docker container
Usage::
hammer docker container create [OPTIONS]
Options::
--attach-stderr ATTACH_STDERR One of true/false, yes/no,
1/0.
--attach-stdin ATTACH_STDIN One of true/false, yes/no,
1/0.
--attach-stdout ATTACH_STDOUT One of true/false, yes/no,
1/0.
--capsule CAPSULE_NAME Name to search by
--capsule-id CAPSULE_ID Id of the capsule
--command COMMAND
--compute-resource COMPUTE_RESOURCE_NAME Compute resource name
--compute-resource-id COMPUTE_RESOURCE_ID
--cpu-sets CPU_SETS
--cpu-shares CPU_SHARES
--entrypoint ENTRYPOINT
--location-ids LOCATION_IDS REPLACE locations with given
ids. Comma separated list of values.
--locations LOCATION_NAMES Comma separated list of
values.
--memory MEMORY
--name NAME
--organization-ids ORGANIZATION_IDS REPLACE organizations with
given ids. Comma separated
list of values.
--organizations ORGANIZATION_NAMES Comma separated list of
values.
--registry-id REGISTRY_ID Registry this container will
have to use to get the image
--repository-name REPOSITORY_NAME Name of the repository to use
to create the container. e.g:
centos
--tag TAG Tag to use to create the
container. e.g: latest
--tty TTY One of true/false, yes/no,
1/0.
"""
# Compute resource ID is a required field.
if (not options or (
u'compute-resource' not in options and
u'compute-resource-id' not in options
)):
raise CLIFactoryError(
'Please provide at least compute-resource or compute-resource-id '
'options.'
)
args = {
u'attach-stderr': None,
u'attach-stdin': None,
u'attach-stdout': None,
u'capsule': None,
u'capsule-id': None,
u'command': 'top',
u'compute-resource': None,
u'compute-resource-id': None,
u'cpu-sets': None,
u'cpu-shares': None,
u'entrypoint': None,
u'location-ids': None,
u'locations': None,
u'memory': None,
u'name': gen_string('alphanumeric'),
u'organization-ids': None,
u'organizations': None,
u'registry-id': None,
u'repository-name': 'busybox',
u'tag': 'latest',
u'tty': None,
}
return create_object(DockerContainer, args, options)
@cacheable
def make_content_view(options=None):
"""
Usage::
hammer content-view create [OPTIONS]
Options::
--component-ids COMPONENT_IDS List of component content view
version ids for composite views
Comma separated list of values.
--composite Create a composite content view
--description DESCRIPTION Description for the content view
--label LABEL Content view label
--name NAME Name of the content view
--organization ORGANIZATION_NAME Organization name to search by
--organization-id ORGANIZATION_ID Organization identifier
--organization-label ORGANIZATION_LABEL Organization label to
search by
--product PRODUCT_NAME Product name to search by
--product-id PRODUCT_ID product numeric identifier
--repositories REPOSITORY_NAMES Comma separated list of values.
--repository-ids REPOSITORY_IDS List of repository ids
Comma separated list of values.
-h, --help print help
"""
return make_content_view_with_credentials(options)
def make_content_view_with_credentials(options=None, credentials=None):
"""
Usage::
hammer content-view create [OPTIONS]
Options::
--component-ids COMPONENT_IDS List of component content view
version ids for composite views
Comma separated list of values.
--composite Create a composite content view
--description DESCRIPTION Description for the content view
--label LABEL Content view label
--name NAME Name of the content view
--organization ORGANIZATION_NAME Organization name to search by
--organization-id ORGANIZATION_ID Organization identifier
--organization-label ORGANIZATION_LABEL Organization label to search by
--product PRODUCT_NAME Product name to search by
--product-id PRODUCT_ID product numeric identifier
--repositories REPOSITORY_NAMES Comma separated list of values.
--repository-ids REPOSITORY_IDS List of repository ids
Comma separated list of values.
-h, --help print help
If credentials is None default credentials present on
robottelo.properties will be used.
"""
# Organization ID is a required field.
if not options or not options.get('organization-id'):
raise CLIFactoryError('Please provide a valid ORG ID.')
args = {
u'component-ids': None,
u'composite': False,
u'description': None,
u'label': None,
u'name': gen_string('alpha', 10),
u'organization': None,
u'organization-id': None,
u'organization-label': None,
u'product': None,
u'product-id': None,
u'repositories': None,
u'repository-ids': None
}
cv_cls = _entity_with_credentials(credentials, ContentView)
return create_object(cv_cls, args, options)
@cacheable
def make_content_view_filter(options=None):
"""
Usage::
content-view filter create [OPTIONS]
Options::
--content-view CONTENT_VIEW_NAME Content view name to search by
--content-view-id CONTENT_VIEW_ID content view numeric identifier
--description DESCRIPTION description of the filter
--inclusion INCLUSION specifies if content should be
included or excluded, default:
inclusion=false
One of true/false, yes/no, 1/0.
--name NAME name of the filter
--organization ORGANIZATION_NAME Organization name to search by
--organization-id ORGANIZATION_ID Organization ID to search by
--organization-label ORGANIZATION_LABEL Organization label to search by
--original-packages ORIGINAL_PACKAGES add all packages without errata
to the included/ excluded list.
(package filter only)
One of true/false, yes/no, 1/0.
--repositories REPOSITORY_NAMES Comma separated list of values.
--repository-ids REPOSITORY_IDS list of repository ids
Comma separated list of values.
--type TYPE type of filter (e.g. rpm,
package_group, erratum)
-h, --help print help
"""
args = {
u'content-view': None,
u'content-view-id': None,
u'description': None,
u'inclusion': None,
u'name': gen_string('alpha', 10),
u'organization': None,
u'organization-id': None,
u'organization-label': None,
u'original-packages': None,
u'repositories': None,
u'repository-ids': None,
u'type': None,
}
return create_object(ContentViewFilter, args, options)
@cacheable
def make_content_view_filter_rule(options=None):
"""
Usage::
content-view filter rule create [OPTIONS]
Options::
--content-view CONTENT_VIEW_NAME Content view name to search by
--content-view-filter CONTENT_VIEW_FILTER_NAME Name to search by
--content-view-filter-id CONTENT_VIEW_FILTER_ID filter identifier
--content-view-id CONTENT_VIEW_ID content view numeric identifier
--date-type DATE_TYPE erratum: search using the
'Issued On' or 'Updated On'
column of the errata.
Values are 'issued'/'updated'
--end-date END_DATE erratum: end date (YYYY-MM-DD)
--errata-id ERRATA_ID erratum: id
--errata-ids ERRATA_IDS erratum: IDs or a select all
object
Comma separated list of values.
--max-version MAX_VERSION package: maximum version
--min-version MIN_VERSION package: minimum version
--name NAME package and package group names
Comma separated list of values.
--names NAMES Package and package group names
--start-date START_DATE erratum: start date
(YYYY-MM-DD)
--types TYPES erratum: types (enhancement,
bugfix, security)
Comma separated list of values.
--version VERSION package: version
-h, --help print help
"""
args = {
u'content-view': None,
u'content-view-filter': None,
u'content-view-filter-id': None,
u'content-view-id': None,
u'date-type': None,
u'end-date': None,
u'errata-id': None,
u'errata-ids': None,
u'max-version': None,
u'min-version': None,
u'name': None,
u'names': None,
u'start-date': None,
u'types': None,
u'version': None,
}
return create_object(ContentViewFilterRule, args, options)
@cacheable
def make_discoveryrule(options=None):
"""
Usage::
hammer discovery_rule create [OPTIONS]
Options::
--enabled ENABLED flag is used for temporary shutdown
of rules
One of true/false, yes/no, 1/0.
--hostgroup HOSTGROUP_NAME Hostgroup name
--hostgroup-id HOSTGROUP_ID
--hostgroup-title HOSTGROUP_TITLE Hostgroup title
--hostname HOSTNAME defines a pattern to assign
human-readable hostnames to the
matching hosts
--hosts-limit HOSTS_LIMIT
--location-ids LOCATION_IDS REPLACE locations with given ids
Comma separated list of values.
--locations LOCATION_NAMES Comma separated list of values.
--max-count MAX_COUNT enables to limit maximum amount of
provisioned hosts per rule
--name NAME represents rule name shown to the
users
--organization-ids ORGANIZATION_IDS REPLACE organizations with given
ids.
Comma separated list of values.
--organizations ORGANIZATION_NAMES Comma separated list of values.
--priority PRIORITY puts the rules in order, low
numbers go first. Must be greater
then zero
--search SEARCH query to match discovered hosts for
the particular rule
-h, --help print help
"""
# Organizations, Locations, search query, hostgroup are required fields.
if not options:
raise CLIFactoryError('Please provide required parameters')
# Organizations fields is required
if not any(options.get(key) for key in [
'organizations', 'organization-ids'
]):
raise CLIFactoryError('Please provide a valid organization field.')
# Locations field is required
if not any(options.get(key) for key in ['locations', 'location-ids']):
raise CLIFactoryError('Please provide a valid location field.')
# search query is required
if not options.get('search'):
raise CLIFactoryError('Please provider a valid search query')
# hostgroup is required
if not any(options.get(key) for key in ['hostgroup', 'hostgroup-id']):
raise CLIFactoryError('Please provider a valid hostgroup')
args = {
u'enabled': None,
u'hostgroup': None,
u'hostgroup-id': None,
u'hostgroup-title': None,
u'hostname': None,
u'hosts-limit': None,
u'location-ids': None,
u'locations': None,
u'max-count': None,
u'name': gen_alphanumeric(),
u'organizations': None,
u'organization-ids': None,
u'priority': None,
u'search': None,
}
return create_object(DiscoveryRule, args, options)
@cacheable
def make_gpg_key(options=None):
"""
Usage::
hammer gpg create [OPTIONS]
Options::
--key GPG_KEY_FILE GPG Key file
--name NAME identifier of the GPG Key
--organization ORGANIZATION_NAME Organization name to search by
--organization-id ORGANIZATION_ID organization identifier
--organization-label ORGANIZATION_LABEL Organization label to search by
-h, --help print help
"""
# Organization ID is a required field.
if not options or not options.get('organization-id'):
raise CLIFactoryError('Please provide a valid ORG ID.')
# Create a fake gpg key file if none was provided
if not options.get('key'):
(_, key_filename) = mkstemp(text=True)
os.chmod(key_filename, 0o700)
with open(key_filename, 'w') as gpg_key_file:
gpg_key_file.write(gen_alphanumeric(gen_integer(20, 50)))
else:
# If the key is provided get its local path and remove it from options
# to not override the remote path
key_filename = options.pop('key')
args = {
u'key': '/tmp/{0}'.format(gen_alphanumeric()),
u'name': gen_alphanumeric(),
u'organization': None,
u'organization-id': None,
u'organization-label': None,
}
# Upload file to server
ssh.upload_file(local_file=key_filename, remote_file=args['key'])
return create_object(GPGKey, args, options)
@cacheable
def make_location(options=None):
"""Location CLI factory
Usage::
hammer location create [OPTIONS]
Options::
--compute-resource-ids COMPUTE_RESOURCE_IDS Compute resource IDs
Comma separated list of
values.
--compute-resources COMPUTE_RESOURCE_NAMES Compute resource names
Comma separated list of
values.
--config-template-ids CONFIG_TEMPLATE_IDS Provisioning template IDs
Comma separated list of
values.
--config-templates CONFIG_TEMPLATE_NAMES Provisioning template names
Comma separated list of
values.
--description DESCRIPTION Location description
--domain-ids DOMAIN_IDS Domain IDs
Comma separated list of
values.
--domains DOMAIN_NAMES Domain names
Comma separated list of
values.
--environment-ids ENVIRONMENT_IDS Environment IDs
Comma separated list of
values.
--environments ENVIRONMENT_NAMES Environment names
Comma separated list of
values.
--puppet-environment-ids ENVIRONMENT_IDS Environment IDs
Comma separated list of
values.
--puppet-environments ENVIRONMENT_NAMES Environment names
Comma separated list of
values.
--hostgroup-ids HOSTGROUP_IDS Host group IDs
Comma separated list of
values.
--hostgroups HOSTGROUP_NAMES Host group names
Comma separated list of
values.
--medium-ids MEDIUM_IDS Media IDs
Comma separated list of
values.
--name NAME
--realm-ids REALM_IDS Realm IDs
Comma separated list of
values.
--realms REALM_NAMES Realm names
Comma separated list of
values.
--smart-proxy-ids SMART_PROXY_IDS Smart proxy IDs
Comma separated list of
values.
--smart-proxies SMART_PROXY_NAMES Smart proxy names
Comma separated list of
values.
--subnet-ids SUBNET_IDS Subnet IDs
Comma separated list of
values.
--subnets SUBNET_NAMES Subnet names
Comma separated list of
--user-ids USER_IDS User IDs
Comma separated list of
values.
--users USER_LOGINS User names
Comma separated list of
values.
"""
args = {
u'compute-resource-ids': None,
u'compute-resources': None,
u'config-template-ids': None,
u'config-templates': None,
u'description': None,
u'domain-ids': None,
u'domains': None,
u'environment-ids': None,
u'environments': None,
u'puppet-environment-ids': None,
u'puppet-environments': None,
u'hostgroup-ids': None,
u'hostgroups': None,
u'medium-ids': None,
u'name': gen_alphanumeric(),
u'parent-id': None,
u'realm-ids': None,
u'realms': None,
u'smart-proxy-ids': None,
u'smart-proxies': None,
u'subnet-ids': None,
u'subnets': None,
u'user-ids': None,
u'users': None,
}
return create_object(Location, args, options)
@cacheable
def make_model(options=None):
"""
Usage::
hammer model create [OPTIONS]
Options::
--hardware-model HARDWARE_MODEL
--info INFO
--name NAME
--vendor-class VENDOR_CLASS
"""
args = {
u'hardware-model': None,
u'info': None,
u'name': gen_alphanumeric(),
u'vendor-class': None,
}
return create_object(Model, args, options)
@cacheable
def make_partition_table(options=None):
"""
Usage::
hammer partition-table create [OPTIONS]
Options::
--file LAYOUT Path to a file that contains the
partition layout
--location-ids LOCATION_IDS REPLACE locations with given ids
Comma separated list of values.
--locations LOCATION_NAMES Comma separated list of values.
--name NAME
--operatingsystem-ids OPERATINGSYSTEM_IDS Array of operating system IDs
to associate with the partition table Comma separated list of
values. Values containing comma should be double quoted
--operatingsystems OPERATINGSYSTEM_TITLES Comma separated list of
values. Values containing comma should be double quoted
--organization-ids ORGANIZATION_IDS REPLACE organizations with given
ids.
Comma separated list of values.
--organizations ORGANIZATION_NAMES Comma separated list of values.
--os-family OS_FAMILY
"""
if options is None:
options = {}
(_, layout) = mkstemp(text=True)
os.chmod(layout, 0o700)
with open(layout, 'w') as ptable:
ptable.write(options.get('content', 'default ptable content'))
args = {
u'file': '/tmp/{0}'.format(gen_alphanumeric()),
u'location-ids': None,
u'locations': None,
u'name': gen_alphanumeric(),
u'operatingsystem-ids': None,
u'operatingsystems': None,
u'organization-ids': None,
u'organizations': None,
u'os-family': random.choice(OPERATING_SYSTEMS),
}
# Upload file to server
ssh.upload_file(local_file=layout, remote_file=args['file'])
return create_object(PartitionTable, args, options)
@cacheable
def make_product(options=None):
return make_product_with_credentials(options)
def make_product_with_credentials(options=None, credentials=None):
"""
Usage::
hammer product create [OPTIONS]
Options::
--description DESCRIPTION Product description
--gpg-key GPG_KEY_NAME Name to search by
--gpg-key-id GPG_KEY_ID Identifier of the GPG key
--label LABEL
--name NAME
--organization ORGANIZATION_NAME Organization name to search by
--organization-id ORGANIZATION_ID ID of the organization
--organization-label ORGANIZATION_LABEL Organization label to search by
--sync-plan SYNC_PLAN_NAME Sync plan name to search by
--sync-plan-id SYNC_PLAN_ID Plan numeric identifier
-h, --help print help
"""
# Organization ID is a required field.
if not options or not options.get('organization-id'):
raise CLIFactoryError('Please provide a valid ORG ID.')
args = {
u'description': gen_string('alpha', 20),
u'gpg-key': None,
u'gpg-key-id': None,
u'label': gen_string('alpha', 20),
u'name': gen_string('alpha', 20),
u'organization': None,
u'organization-id': None,
u'organization-label': None,
u'sync-plan': None,
u'sync-plan-id': None,
}
product_cls = _entity_with_credentials(credentials, Product)
return create_object(product_cls, args, options)
def make_product_wait(options=None, wait_for=5):
"""Wrapper function for make_product to make it wait before erroring out.
This is a temporary workaround for BZ#1332650: Sometimes cli product
create errors for no reason when there are multiple product creation
requests at the sametime although the product entities are created. This
workaround will attempt to wait for 5 seconds and query the
product again to make sure it is actually created. If it is not found,
it will fail and stop.
Note: This wrapper method is created instead of patching make_product
because this issue does not happen for all entities and this workaround
should be removed once the root cause is identified/fixed.
"""
# Organization ID is a required field.
if not options or not options.get('organization-id'):
raise CLIFactoryError('Please provide a valid ORG ID.')
options['name'] = options.get('name', gen_string('alpha'))
try:
product = make_product(options)
except CLIFactoryError as err:
if not bz_bug_is_open(1332650):
raise err
sleep(wait_for)
try:
product = Product.info({
'name': options.get('name'),
'organization-id': options.get('organization-id'),
})
except CLIReturnCodeError:
raise err
if not product:
raise err
return product
@cacheable
def make_proxy(options=None):
"""
Usage::
hammer proxy create [OPTIONS]
Options::
--location-ids LOCATION_IDS REPLACE locations with given ids
Comma separated list of values.
--name NAME
--organization-ids ORGANIZATION_IDS REPLACE organizations
with given ids.
Comma separated list of values.
-h, --help print help
"""
args = {
u'name': gen_alphanumeric(),
}
if options is None or 'url' not in options:
newport = get_available_capsule_port()
try:
with default_url_on_new_port(9090, newport) as url:
args['url'] = url
return create_object(Proxy, args, options)
except CapsuleTunnelError as err:
raise CLIFactoryError(
'Failed to create ssh tunnel: {0}'.format(err))
args['url'] = options['url']
return create_object(Proxy, args, options)
def make_registry(options=None):
"""Creates a docker registry
Usage::
hammer docker registry create [OPTIONS]
Options::
--description DESCRIPTION
--name NAME
--password PASSWORD
--url URL
--username USERNAME
"""
args = {
u'description': None,
u'name': gen_string('alphanumeric'),
u'password': None,
u'url': settings.docker.external_registry_1,
u'username': None,
}
return create_object(DockerRegistry, args, options)
@cacheable
def make_repository(options=None):
return make_repository_with_credentials(options)
def make_repository_with_credentials(options=None, credentials=None):
"""
Usage::
hammer repository create [OPTIONS]
Options::
--checksum-type CHECKSUM_TYPE checksum of the repository,
currently 'sha1' & 'sha256'
are supported.'
--content-type CONTENT_TYPE type of repo (either 'yum',
'puppet', 'docker' or 'ostree',
defaults to 'yum')
--docker-tags-whitelist DOCKER_TAGS_WHITELIST Comma separated list of
tags to sync for Container Image
repository
--docker-upstream-name DOCKER_UPSTREAM_NAME name of the upstream docker
repository
--download-policy DOWNLOAD_POLICY download policy for yum repos
(either 'immediate','on_demand'
or 'background')
--gpg-key GPG_KEY_NAME Name to search by
--gpg-key-id GPG_KEY_ID gpg key numeric identifier
--ignorable-content IGNORABLE_CONTENT List of content units to ignore
while syncing a yum repository.
Subset of rpm, drpm, srpm,
distribution, erratum
--label LABEL
--mirror-on-sync MIRROR_ON_SYNC true if this repository when
synced has to be mirrored from
the source and stale rpms
removed.
--name NAME
--organization ORGANIZATION_NAME Organization name to search by
--organization-id ORGANIZATION_ID organization ID
--organization-label ORGANIZATION_LABEL Organization label to search by
--ostree-upstream-sync-depth OSTREE_UPSTREAM_SYNC_DEPTH if a custom
sync policy is chosen for
ostree repositories then a
'depth' value must be provided.
--ostree-upstream-sync-policy OSTREE_UPSTREAM_SYNC_POLICY policies for
syncing upstream ostree
repositories. Possible
value(s): 'latest', 'all',
'custom'
--product PRODUCT_NAME Product name to search by
--product-id PRODUCT_ID product numeric identifier
--publish-via-http ENABLE Publish Via HTTP
One of true/false, yes/no, 1/0.
--upstream-password UPSTREAM_PASSWORD Password of the upstream
repository user used for
authentication
--upstream-username UPSTREAM_USERNAME Username of the upstream
repository user used for
authentication
--url URL repository source url
-h, --help print help
"""
# Product ID is a required field.
if not options or not options.get('product-id'):
raise CLIFactoryError('Please provide a valid Product ID.')
args = {
u'checksum-type': None,
u'content-type': u'yum',
u'docker-tags-whitelist': None,
u'docker-upstream-name': None,
u'download-policy': None,
u'gpg-key': None,
u'gpg-key-id': None,
u'ignorable-content': None,
u'label': None,
u'mirror-on-sync': None,
u'name': gen_string('alpha', 15),
u'organization': None,
u'organization-id': None,
u'organization-label': None,
u'product': None,
u'product-id': None,
u'publish-via-http': u'true',
u'url': FAKE_1_YUM_REPO,
}
repo_cls = _entity_with_credentials(credentials, Repository)
return create_object(repo_cls, args, options)
@cacheable
def make_role(options=None):
"""Usage::
hammer role create [OPTIONS]
Options::
--name NAME
"""
# Assigning default values for attributes
args = {u'name': gen_alphanumeric(6)}
return create_object(Role, args, options)
@cacheable
def make_filter(options=None):
"""
Usage::
hammer filter create [OPTIONS]
Options::
--location-ids LOCATION_IDS Comma separated list of values.
--locations LOCATION_NAMES Comma separated list of values.
--organization-ids ORGANIZATION_IDS Comma separated list of values.
--organizations ORGANIZATION_NAMES Comma separated list of values.
--override OVERRIDE One of true/false, yes/no, 1/0.
--permission-ids PERMISSION_IDS Comma separated list of values.
--permissions PERMISSION_NAMES Comma separated list of values.
--role ROLE_NAME User role name
--role-id ROLE_ID
--search SEARCH
-h, --help print help
"""
args = {
u'location-ids': None,
u'locations': None,
u'organization-ids': None,
u'organizations': None,
u'override': None,
u'permission-ids': None,
u'permissions': None,
u'role': None,
u'role-id': None,
u'search': None,
}
# Role and permissions are required fields.
if not options:
raise CLIFactoryError('Please provide required parameters')
# Do we have at least one role field?
if not any(options.get(key) for key in ['role', 'role-id']):
raise CLIFactoryError('Please provide a valid role field.')
# Do we have at least one permissions field?
if not any(options.get(key) for key in ['permissions', 'permission-ids']):
raise CLIFactoryError('Please provide a valid permissions field.')
return create_object(Filter, args, options)
@cacheable
def make_scap_policy(options=None):
"""
Usage::
policy create [OPTIONS]
Options::
--cron-line CRON_LINE Policy schedule
cron line
--day-of-month DAY_OF_MONTH Policy schedule
day of month
(only if period
== “monthly”)
--deploy-by DEPLOY_BY How the policy should be deployed
Possible value(s): 'puppet',
'ansible', 'manual'
--description DESCRIPTION Policy
description
--hostgroup-ids HOSTGROUP_IDS Apply policy to
host groups
Comma separated
Values list of
values.
containing comma
or should be
quoted escaped
with backslash
--hostgroups HOSTGROUP_NAMES Comma separated
list of values.
Values
containing
comma should be
quoted or
escaped with
backslash
--location-ids LOCATION_IDS REPLACE
locations
with given ids
Comma separated
list of values.
containing comma
should be quoted
escaped with
backslash
--locations LOCATION_NAMES Comma separated
list of values.
containing comma
should be quoted
escaped with
backslash
--name NAME Policy name
--organization-ids ORGANIZATION_IDS REPLACE
organizations
with given ids.
Comma separated
list of values.
containing comma
should be quoted
escaped with
backslash
--organizations ORGANIZATION_NAMES Comma separated
list of values.
containing comma
should be quoted
escaped with
backslash
--period PERIOD Policy schedule
period (weekly,
monthly, custom)
--scap-content SCAP_CONTENT_TITLE SCAP content
title
--scap-content-id SCAP_CONTENT_ID
--scap-content-profile-id SCAP_CONTENT_PROFILE_ID Policy SCAP
content
profile ID
--tailoring-file TAILORING_FILE_NAME Tailoring file
name
--tailoring-file-id TAILORING_FILE_ID
--tailoring-file-profile-id TAILORING_FILE_PROFILE_ID Tailoring file
profile ID
--weekday WEEKDAY Policy schedule
weekday (only if
period
== “weekly”)
-h, --help print help
"""
# Assigning default values for attributes
# SCAP ID and SCAP profile ID is a required field.
if not options and not options.get('scap-content-id') and not options.get(
'scap-content-profile-id') and not options.get('period') and not options.get(
'deploy-by'):
raise CLIFactoryError('Please provide a valid SCAP ID or'
' SCAP Profile ID or Period or Deploy by option')
args = {
u'description': None,
u'scap-content-id': None,
u'scap-content-profile-id': None,
u'deploy-by': None,
u'period': None,
u'weekday': None,
u'day-of-month': None,
u'cron-line': None,
u'hostgroup-ids': None,
u'hostgroups': None,
u'locations': None,
u'organizations': None,
u'tailoring-file': None,
u'tailoring-file-id': None,
u'tailoring-file-profile-id': None,
u'location-ids': None,
u'name': gen_alphanumeric().lower(),
u'organization-ids': None,
}
return create_object(Scappolicy, args, options)
@cacheable
def make_subnet(options=None):
"""
Usage::
hammer subnet create [OPTIONS]
Options::
--boot-mode BOOT_MODE Default boot mode for interfaces assigned
to this subnet, valid values are
"Static", "DHCP"
--dhcp-id DHCP_ID DHCP Proxy to use within this subnet
--dns-id DNS_ID DNS Proxy to use within this subnet
--dns-primary DNS_PRIMARY Primary DNS for this subnet
--dns-secondary DNS_SECONDARY Secondary DNS for this subnet
--domain-ids DOMAIN_IDS Numerical ID or domain name
--domains DOMAIN_NAMES Comma separated list of values.
--from FROM Starting IP Address for IP auto
suggestion
--gateway GATEWAY Primary DNS for this subnet
--ipam IPAM IP Address auto suggestion mode for this
subnet, valid values are
'DHCP', 'Internal DB', 'None'
--location-ids LOCATION_IDS
--locations LOCATION_NAMES Comma separated list of values.
--mask MASK Netmask for this subnet
--name NAME Subnet name
--network NETWORK Subnet network
--organization-ids ORGANIZATION_IDS organization ID
--organizations ORGANIZATION_NAMES Comma separated list of values.
--tftp-id TFTP_ID TFTP Proxy to use within this subnet
--to TO Ending IP Address for IP auto suggestion
--vlanid VLANID VLAN ID for this subnet
-h, --help print help
"""
args = {
u'boot-mode': None,
u'dhcp-id': None,
u'dns-id': None,
u'dns-primary': None,
u'dns-secondary': None,
u'domain-ids': None,
u'domains': None,
u'from': None,
u'gateway': None,
u'ipam': None,
u'location-ids': None,
u'locations': None,
u'mask': gen_netmask(),
u'name': gen_alphanumeric(8),
u'network': gen_ipaddr(ip3=True),
u'organization-ids': None,
u'organizations': None,
u'tftp-id': None,
u'to': None,
u'vlanid': None,
}
return create_object(Subnet, args, options)
@cacheable
def make_sync_plan(options=None):
"""
Usage::
hammer sync-plan create [OPTIONS]
Options::
--description DESCRIPTION sync plan description
--enabled ENABLED enables or disables
synchronization. One of
true/false, yes/no, 1/0.
--interval INTERVAL how often synchronization
should run. One of 'none',
'hourly', 'daily', 'weekly'
'custom cron'.
Default: ""none""
--name NAME sync plan name
--organization ORGANIZATION_NAME Organization name to search by
--organization-id ORGANIZATION_ID organization ID
--organization-label ORGANIZATION_LABEL Organization label to search by
--sync-date SYNC_DATE start date and time of the
synchronization defaults to now
Date and time in YYYY-MM-DD
HH:MM:SS or ISO 8601 format
Default: "2014-10-07 08:50:35"
--cron-expression CRON EXPRESSION Set this when interval is
custom cron
-h, --help print help
"""
# Organization ID is a required field.
if not options or not options.get('organization-id'):
raise CLIFactoryError('Please provide a valid ORG ID.')
args = {
u'description': gen_string('alpha', 20),
u'enabled': 'true',
u'interval': random.choice(list(SYNC_INTERVAL.values())),
u'name': gen_string('alpha', 20),
u'organization': None,
u'organization-id': None,
u'organization-label': None,
u'sync-date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
u'cron-expression': None,
}
if (options.get('interval', args['interval']) == SYNC_INTERVAL['custom']
and not options.get('cron-expression')):
args['cron-expression'] = gen_choice(valid_cron_expressions())
return create_object(SyncPlan, args, options)
@cacheable
def make_host(options=None):
"""
Usage::
hammer host create [OPTIONS]
Options::
--architecture ARCHITECTURE_NAME Architecture name
--architecture-id ARCHITECTURE_ID
--ask-root-password ASK_ROOT_PW One of true/false, yes/no,
1/0.
--autoheal AUTOHEAL Sets whether the Host will
autoheal subscriptions upon
checkin
One of true/false, yes/no,
1/0.
--build BUILD One of true/false, yes/no,
1/0.
Default: "true"
--comment COMMENT Additional information
about this host
--compute-attributes COMPUTE_ATTRS Compute resource attributes
Comma-separated list of
key=value.
--compute-profile COMPUTE_PROFILE_NAME Name to search by
--compute-profile-id COMPUTE_PROFILE_ID
--compute-resource COMPUTE_RESOURCE_NAME Compute resource name
--compute-resource-id COMPUTE_RESOURCE_ID
--config-group-ids CONFIG_GROUP_IDS IDs of associated config
groups. Comma separated
list of values
--config-groups CONFIG_GROUP_NAMES Comma separated list of
values.
--content-source-id CONTENT_SOURCE_ID
--content-view CONTENT_VIEW_NAME Name to search by
--content-view-id CONTENT_VIEW_ID content view numeric
identifier
--domain DOMAIN_NAME Domain name
--domain-id DOMAIN_ID Numerical ID or domain name
--enabled ENABLED One of true/false, yes/no,
1/0.
Default: "true"
--environment ENVIRONMENT_NAME Environment name
--environment-id ENVIRONMENT_ID
--hostgroup HOSTGROUP_NAME Hostgroup name
--hostgroup-id HOSTGROUP_ID
--hostgroup-title HOSTGROUP_TITLE Hostgroup title
--hypervisor-guest-uuids HYPERVISOR_GUEST_UUIDS List of hypervisor
guest uuids
Comma separated
list of values.
--image IMAGE_NAME Name to search by
--image-id IMAGE_ID
--interface INTERFACE Interface parameters.
Comma-separated list of
key=value.
Can be specified multiple
times.
--ip IP not required if using a
subnet with DHCP Capsule
--kickstart-repository-id KICKSTART_REPOSITORY_ID Repository Id
associated with the
kickstart repo used
for provisioning
--lifecycle-environment LIFECYCLE_ENVIRONMENT_NAME Name to search by
--lifecycle-environment-id LIFECYCLE_ENVIRONMENT_ID ID of the
environment
--location LOCATION_NAME Location name
--location-id LOCATION_ID
--mac MAC required for managed host
that is bare metal, not
required if it's a virtual
machine
--managed MANAGED One of true/false, yes/no,
1/0.
Default: "true"
--medium MEDIUM_NAME Medium name
--medium-id MEDIUM_ID
--model MODEL_NAME Model name
--model-id MODEL_ID
--name NAME
--operatingsystem OPERATINGSYSTEM_TITLE Operating system title
--operatingsystem-id OPERATINGSYSTEM_ID
--organization ORGANIZATION_NAME Organization name
--organization-id ORGANIZATION_ID organization ID
--overwrite OVERWRITE One of true/false, yes/no,
1/0.
Default: "true"
--owner OWNER_LOGIN Login of the owner
--owner-id OWNER_ID ID of the owner
--owner-type OWNER_TYPE Host's owner type
Possible value(s): 'User',
'Usergroup'
--parameters PARAMS Host parameters.
Comma-separated list of
key=value.
--partition-table PARTITION_TABLE_NAME Partition table name
--partition-table-id PARTITION_TABLE_ID
--progress-report-id PROGRESS_REPORT_ID UUID to track orchestration
tasks status, GET
/api/orchestration/:UUID
/tasks
--provision-method METHOD Possible value(s): 'build',
'image'
--puppet-ca-proxy PUPPET_CA_PROXY_NAME
--puppet-ca-proxy-id PUPPET_CA_PROXY_ID
--puppet-class-ids PUPPET_CLASS_IDS Comma separated list of
values.
--puppet-classes PUPPET_CLASS_NAMES Comma separated list of
values.
--puppet-proxy PUPPET_PROXY_NAME
--puppet-proxy-id PUPPET_PROXY_ID
--pxe-loader PXE_LOADER DHCP filename option
(Grub2/PXELinux by default)
Possible value(s): 'None',
'PXELinux BIOS',
'PXELinux UEFI',
'Grub UEFI',
'Grub UEFI SecureBoot',
'Grub2 UEFI',
'Grub2 UEFI SecureBoot'
--realm REALM_NAME Name to search by
--realm-id REALM_ID Numerical ID or realm name
--release-version RELEASE_VERSION Release version for this
Host to use (7Server, 7.1,
etc)
--root-password ROOT_PW required if host is managed
and value is not inherited
from host group or default
password in settings
--service-level SERVICE_LEVEL Service level to be used
for autoheal.
--subnet SUBNET_NAME Subnet name
--subnet-id SUBNET_ID
--volume VOLUME Volume parameters
Comma-separated list of
key=value.
Can be specified multiple
times.
Available keys for --interface::
mac
ip
type Possible values: interface,
bmc, bond, bridge
name
subnet_id
domain_id
identifier
managed true/false
primary true/false, each managed
hosts needs to have one
primary interface.
provision true/false
virtual true/false
"""
args = {
u'architecture': None,
u'architecture-id': None,
u'ask-root-password': None,
u'autoheal': None,
u'build': None,
u'comment': None,
u'compute-attributes': None,
u'compute-profile': None,
u'compute-profile-id': None,
u'compute-resource': None,
u'compute-resource-id': None,
u'content-source-id': None,
u'content-view': None,
u'content-view-id': None,
u'domain': None,
u'domain-id': None,
u'enabled': None,
u'environment': None,
u'environment-id': None,
u'hostgroup': None,
u'hostgroup-id': None,
u'hostgroup-title': None,
u'hypervisor-guest-uuids': None,
u'image': None,
u'image-id': None,
u'interface': None,
u'ip': gen_ipaddr(),
u'kickstart-repository-id': None,
u'lifecycle-environment': None,
u'lifecycle-environment-id': None,
u'location': None,
u'location-id': None,
u'mac': gen_mac(multicast=False),
u'managed': None,
u'medium': None,
u'medium-id': None,
u'model': None,
u'model-id': None,
u'name': gen_string('alpha', 10),
u'operatingsystem': None,
u'operatingsystem-id': None,
u'openscap-proxy-id': None,
u'organization': None,
u'organization-id': None,
u'overwrite': None,
u'owner': None,
u'owner-id': None,
u'owner-type': None,
u'parameters': None,
u'partition-table': None,
u'partition-table-id': None,
u'progress-report-id': None,
u'provision-method': None,
u'puppet-ca-proxy': None,
u'puppet-ca-proxy-id': None,
u'puppet-class-ids': None,
u'puppet-classes': None,
u'puppet-proxy': None,
u'puppet-proxy-id': None,
u'pxe-loader': None,
u'realm': None,
u'realm-id': None,
u'root-password': gen_string('alpha', 8),
u'service-level': None,
u'subnet': None,
u'subnet-id': None,
u'volume': None,
}
return create_object(Host, args, options)
@cacheable
def make_fake_host(options=None):
"""Wrapper function for make_host to pass all required options for creation
of a fake host
"""
if options is None:
options = {}
# Try to use default Satellite entities, otherwise create them if they were
# not passed or defined previously
if not options.get('organization') and not options.get('organization-id'):
try:
options['organization-id'] = Org.info({'name': DEFAULT_ORG})['id']
except CLIReturnCodeError:
options['organization-id'] = make_org()['id']
if not options.get('location') and not options.get('location-id'):
try:
options['location-id'] = Location.info({'name': DEFAULT_LOC})['id']
except CLIReturnCodeError:
options['location-id'] = make_location()['id']
if not options.get('domain') and not options.get('domain-id'):
options['domain-id'] = make_domain({
'location-ids': options.get('location-id'),
'locations': options.get('location'),
'organization-ids': options.get('organization-id'),
'organizations': options.get('organization'),
})['id']
if not options.get('architecture') and not options.get('architecture-id'):
try:
options['architecture-id'] = Architecture.info({
'name': DEFAULT_ARCHITECTURE})['id']
except CLIReturnCodeError:
options['architecture-id'] = make_architecture()['id']
if (not options.get('operatingsystem') and
not options.get('operatingsystem-id')):
try:
options['operatingsystem-id'] = OperatingSys.list({
'search': 'name="RedHat" AND major="{0}" OR major="{1}"'
.format(
RHEL_6_MAJOR_VERSION,
RHEL_7_MAJOR_VERSION
)
})[0]['id']
except IndexError:
options['operatingsystem-id'] = make_os({
'architecture-ids': options.get('architecture-id'),
'architectures': options.get('architecture'),
'partition-table-ids': options.get('partition-table-id'),
'partition-tables': options.get('partition-table'),
})['id']
if (not options.get('partition-table') and
not options.get('partition-table-id')):
try:
options['partition-table-id'] = PartitionTable.list({
'operatingsystem': options.get('operatingsystem'),
'operatingsystem-id': options.get('operatingsystem-id'),
})[0]['id']
except IndexError:
options['partition-table-id'] = make_partition_table({
'location-ids': options.get('location-id'),
'locations': options.get('location'),
'operatingsystem-ids': options.get('operatingsystem-id'),
'organization-ids': options.get('organization-id'),
'organizations': options.get('organization'),
})['id']
# Finally, create a new medium (if none was passed)
if not options.get('medium') and not options.get('medium-id'):
options['medium-id'] = make_medium({
'location-ids': options.get('location-id'),
'locations': options.get('location'),
'operatingsystems': options.get('operatingsystem'),
'operatingsystem-ids': options.get('operatingsystem-id'),
'organization-ids': options.get('organization-id'),
'organizations': options.get('organization'),
})['id']
return make_host(options)
@cacheable
def make_host_collection(options=None):
"""
Usage::
host-collection create [OPTIONS]
Options::
--description DESCRIPTION
--host-collection-ids HOST_COLLECTION_IDS Array of content host ids to
replace the content hosts in
host collection
Comma separated list of
values
--hosts HOST_NAMES Comma separated list of
values
--max-hosts MAX_CONTENT_HOSTS Maximum number of content
hosts in the host collection
--name NAME Host Collection name
--organization ORGANIZATION_NAME
--organization-id ORGANIZATION_ID Organization identifier
--organization-label ORGANIZATION_LABEL
--unlimited-hosts UNLIMITED_CONTENT_HOSTS Whether or not the host
collection may have
unlimited content hosts
One of true/false, yes/no,
1/0.
-h, --help print help
"""
# Assigning default values for attributes
args = {
u'description': None,
u'host-collection-ids': None,
u'hosts': None,
u'max-hosts': None,
u'name': gen_string('alpha', 15),
u'organization': None,
u'organization-id': None,
u'organization-label': None,
u'unlimited-hosts': None,
}
return create_object(HostCollection, args, options)
@cacheable
def make_job_invocation(options=None):
"""
Usage::
hammer job-invocation create
Options::
--async Do not wait for the task
--bookmark BOOKMARK_NAME Name to search by
--bookmark-id BOOKMARK_ID
--concurrency-level CONCURRENCY_LEVEL Run at most N tasks at a time
--cron-line CRONLINE Create a recurring execution
--description-format DESCRIPTION_FORMAT Override the description
format from the template for
this invocation only
--dynamic Dynamic search queries are
evaluated at run time
--effective-user EFFECTIVE_USER What user should be used to
run the script (using
sudo-like mechanisms).
--end-time DATETIME Perform no more executions
after this time, used with
--cron-line (YYYY-MM-DD
HH:MM:SS or ISO 8601 format)
--input-files INPUT FILES Read input values from files
Comma-separated list of
key=file, where file is a
path to a text file
--inputs INPUTS Inputs from command line
Comma-separated list of
key=value.
--job-template JOB_TEMPLATE_NAME Name to search by
--job-template-id JOB_TEMPLATE_ID
--max-iteration MAX_ITERATION Repeat a maximum of N times
--search-query SEARCH_QUERY
--start-at DATETIME Schedule the execution for
a later time in
YYYY-MM-DD HH:MM:SS
or ISO 8601
--start-before DATETIME Execution should be cancelled
if it cannot be started
before specified datetime
--time-span TIME_SPAN Distribute tasks over
N seconds
"""
args = {
u'async': None,
u'bookmark': None,
u'bookmark-id': None,
u'concurrency-level': None,
u'cron-line': None,
u'description-format': None,
u'dynamic': None,
u'effective-user': None,
u'end-time': None,
u'input-files': None,
u'inputs': None,
u'job-template': None,
u'job-template-id': None,
u'max-iteration': None,
u'search-query': None,
u'start-at': None,
u'start-before': None,
u'time-span': None,
}
return create_object(JobInvocation, args, options)
@cacheable
def make_job_template(options=None):
"""
Usage::
hammer job-template create
Options::
--audit-comment AUDIT_COMMENT
--current-user CURRENT_USER Whether the current user login
should be used as the effective
user.
--description-format DESCRIPTION_FORMAT This template is used to
generate the description.
--file TEMPLATE Path to a file that contains
the template.
--job-category JOB_CATEGORY Job category.
--location-ids LOCATION_IDS Comma separated list of values.
--locations LOCATION_NAMES Comma separated list of values.
--locked LOCKED Whether or not the template is
locked for editing.
--name NAME Template name
--organization-ids ORGANIZATION_IDS Comma separated list of values.
--organizations ORGANIZATION_NAMES Comma separated list of values.
--overridable OVERRIDABLE Whether it should be allowed to
override the effective user
from the invocation form.
--provider-type PROVIDER_TYPE Possible value(s): 'SSH'
--snippet SNIPPET One of true/false, yes/no, 1/0.
--value VALUE What user should be used to run
the script (using sudo-like
mechanisms).
"""
args = {
u'audit-comment': None,
u'current-user': None,
u'description-format': None,
u'file': None,
u'job-category': u'Miscellaneous',
u'location-ids': None,
u'locations': None,
u'name': None,
u'organization-ids': None,
u'organizations': None,
u'overridable': None,
u'provider-type': u'SSH',
u'snippet': None,
u'value': None,
}
return create_object(JobTemplate, args, options)
@cacheable
def make_user(options=None):
"""
Usage::
hammer user create [OPTIONS]
Options::
--admin ADMIN Is an admin account?
--auth-source-id AUTH_SOURCE_ID
--default-location-id DEFAULT_LOCATION_ID
--default-organization-id DEFAULT_ORGANIZATION_ID
--description DESCRIPTION
--firstname FIRSTNAME
--lastname LASTNAME
--location-ids LOCATION_IDS REPLACE locations with given ids
Comma separated list of values.
--login LOGIN
--mail MAIL
--organization-ids ORGANIZATION_IDS REPLACE organizations with
given ids.
Comma separated list of values.
--password PASSWORD
-h, --help print help
"""
login = gen_alphanumeric(6)
# Assigning default values for attributes
args = {
u'admin': None,
u'auth-source-id': 1,
u'default-location-id': None,
u'default-organization-id': None,
u'description': None,
u'firstname': gen_alphanumeric(),
u'lastname': gen_alphanumeric(),
u'location-ids': None,
u'login': login,
u'mail': '{0}@example.com'.format(login),
u'organization-ids': None,
u'password': gen_alphanumeric(),
}
logger.debug(
'User "{0}" password not provided {1} was generated'
.format(args['login'], args['password'])
)
return create_object(User, args, options)
@cacheable
def make_usergroup(options=None):
"""
Usage:
hammer user-group create [OPTIONS]
Options:
--name NAME
--role-ids ROLE_IDS Comma separated list
--roles ROLE_NAMES Comma separated list
--user-group-ids, --usergroup-ids USER_GROUP_IDS Comma separated list
--user-groups, --usergroups USER_GROUP_NAMES Comma separated list
--user-ids USER_IDS Comma separated list
--users USER_LOGINS Comma separated list
"""
# Assigning default values for attributes
args = {
u'name': gen_alphanumeric(),
u'role-ids': None,
u'roles': None,
u'user-group-ids': None,
u'user-groups': None,
u'user-ids': None,
u'users': None,
}
return create_object(UserGroup, args, options)
@cacheable
def make_usergroup_external(options=None):
"""
Usage::
hammer user-group external create [OPTIONS]
Options::
--auth-source-id AUTH_SOURCE_ID ID of linked auth source
--name NAME External user group name
--user-group, --usergroup USER_GROUP_NAME Name to search by
--user-group-id, --usergroup-id USER_GROUP_ID
"""
# UserGroup Name or ID is a required field.
if (
not options or
not options.get('user-group') and
not options.get('user-group-id')
):
raise CLIFactoryError('Please provide a valid UserGroup.')
# Assigning default values for attributes
args = {
u'auth-source-id': 1,
u'name': gen_alphanumeric(8),
u'user-group': None,
u'user-group-id': None,
}
return create_object(UserGroupExternal, args, options)
@cacheable
def make_ldap_auth_source(options=None):
"""
Usage::
hammer auth-source ldap create [OPTIONS]
Options::
--account ACCOUNT
--account-password ACCOUNT_PASSWORD required if onthefly_register
is true
--attr-firstname ATTR_FIRSTNAME required if onthefly_register
is true
--attr-lastname ATTR_LASTNAME required if onthefly_register
is true
--attr-login ATTR_LOGIN required if onthefly_register
is true
--attr-mail ATTR_MAIL required if onthefly_register
is true
--attr-photo ATTR_PHOTO
--base-dn BASE_DN
--groups-base GROUPS_BASE groups base DN
--host HOST
--ldap-filter LDAP_FILTER LDAP filter
--location-ids LOCATION_IDS REPLACE locations with given
ids
Comma separated list of
values. Values containing
comma should be double quoted
--locations LOCATION_NAMES Comma separated list of
values. Values containing
comma should be double quoted
--name NAME
--onthefly-register ONTHEFLY_REGISTER One of
true/false, yes/no, 1/0.
--organization-ids ORGANIZATION_IDS REPLACE organizations with
given ids.
Comma separated list of
values. Values containing
comma should be double quoted
--organizations ORGANIZATION_NAMES Comma separated list of
values. Values containing
comma should be double quoted
--port PORT defaults to 389
--server-type SERVER_TYPE type of the LDAP server
Possible value(s):
'free_ipa',
'active_directory', 'posix'
--tls TLS One of true/false, yes/no,
1/0.
--usergroup-sync USERGROUP_SYNC sync external user groups on
login
One of true/false, yes/no,
1/0.
-h, --help print help
"""
# Assigning default values for attributes
args = {
u'account': None,
u'account-password': None,
u'attr-firstname': None,
u'attr-lastname': None,
u'attr-login': None,
u'attr-mail': None,
u'attr-photo': None,
u'base-dn': None,
u'groups-base': None,
u'host': None,
u'ldap-filter': None,
u'location-ids': None,
u'locations': None,
u'name': gen_alphanumeric(),
u'onthefly-register': None,
u'organization-ids': None,
u'organizations': None,
u'port': None,
u'server-type': None,
u'tls': None,
u'usergroup-sync': None,
}
return create_object(LDAPAuthSource, args, options)
@cacheable
def make_compute_resource(options=None):
"""
Usage::
hammer compute-resource create [OPTIONS]
Options::
--caching-enabled CACHING_ENABLED Enable caching, for VMware only
One of true/false, yes/no, 1/0.
--datacenter DATACENTER For RHEV, VMware Datacenter
--description DESCRIPTION
--display-type DISPLAY_TYPE For Libvirt only
Possible value(s): 'VNC', 'SPICE'
--domain DOMAIN For RHEL OpenStack Platform (v3) only
--location LOCATION_NAME Location name
--location-id LOCATION_ID
--location-ids LOCATION_IDS REPLACE locations with given ids
Comma separated list of values. Values
containing comma should be quoted or escaped
with backslash.
JSON is acceptable and preferred way for
complex parameters
--location-title LOCATION_TITLE Location title
--location-titles LOCATION_TITLES Comma separated list of values. Values
containing comma should be
quoted or escaped with backslash.
JSON is acceptable and preferred way for
complex parameters
--locations LOCATION_NAMES Comma separated list of values. Values
containing comma should be
quoted or escaped with backslash.
JSON is acceptable and preferred way for
complex parameters
--name NAME
--organization ORGANIZATION_NAME Organization name
--organization-id ORGANIZATION_ID Organization ID
--organization-ids ORGANIZATION_IDS REPLACE organizations with given ids.
Comma separated list of values. Values
containing comma should be
quoted or escaped with backslash.
JSON is acceptable and preferred way for
complex parameters
--organization-title ORGANIZATION_TITLE Organization title
--organization-titles ORGANIZATION_TITLES Comma separated list of values. Values
containing comma should be
quoted or escaped with backslash.
JSON is acceptable and preferred way for
complex parameters
--organizations ORGANIZATION_NAMES Comma separated list of values. Values
containing comma should be
quoted or escaped with backslash.
JSON is acceptable and preferred way for
complex parameters
--ovirt-quota OVIRT_QUOTA For RHEV only, ID of quota to use
--password PASSWORD Password for RHEV, EC2, VMware, RHEL OpenStack
Platform. Secret key for EC2
--project-domain-id PROJECT_DOMAIN_ID For RHEL OpenStack Platform (v3) only
--project-domain-name PROJECT_DOMAIN_NAME For RHEL OpenStack Platform (v3) only
--provider PROVIDER Providers include Libvirt, Ovirt, EC2, Vmware,
Openstack, Rackspace, GCE
--public-key PUBLIC_KEY For RHEV only
--public-key-path PUBLIC_KEY_PATH Path to a file that contains oVirt public key
(For oVirt only)
--region REGION For EC2 only, use 'us-gov-west-1' for GovCloud
region
--server SERVER For VMware
--set-console-password SET_CONSOLE_PASSWORD For Libvirt and VMware only
One of true/false, yes/no, 1/0.
--tenant TENANT For RHEL OpenStack Platform only
--url URL URL for Libvirt, RHEV, RHEL OpenStack Platform
and Rackspace
--use-v4 USE_V4 For RHEV only
One of true/false, yes/no, 1/0.
--user USER Username for RHEV, EC2, VMware, RHEL OpenStack
Platform. Access Key for EC2.
--uuid UUID Deprecated, please use datacenter
-h, --help Print help
"""
args = {
u'caching-enabled': None,
u'datacenter': None,
u'description': None,
u'display-type': None,
u'domain': None,
u'location': None,
u'location-id': None,
u'location-ids': None,
u'location-title': None,
u'location-titles': None,
u'locations': None,
u'name': gen_alphanumeric(8),
u'organization': None,
u'organization-id': None,
u'organization-ids': None,
u'organization-title': None,
u'organization-titles': None,
u'organizations': None,
u'ovirt-quota': None,
u'password': None,
u'project-domain-id': None,
u'project-domain-name': None,
u'provider': None,
u'public-key': None,
u'public-key-path': None,
u'region': None,
u'server': None,
u'set-console-password': None,
u'tenant': None,
u'url': None,
u'use-v4': None,
u'user': None,
u'uuid': None,
}
if options is None:
options = {}
if options.get('provider') is None:
options['provider'] = FOREMAN_PROVIDERS['libvirt']
if options.get('url') is None:
options['url'] = 'qemu+tcp://localhost:16509/system'
return create_object(ComputeResource, args, options)
@cacheable
def make_org(options=None):
return make_org_with_credentials(options)
def make_org_with_credentials(options=None, credentials=None):
"""
Usage::
hammer organization create [OPTIONS]
Options::
--compute-resource-ids COMPUTE_RESOURCE_IDS Compute resource IDs
Comma separated list
of values.
--compute-resources COMPUTE_RESOURCE_NAMES Compute resource Names
Comma separated list
of values.
--config-template-ids CONFIG_TEMPLATE_IDS Provisioning template IDs
Comma separated list
of values.
--config-templates CONFIG_TEMPLATE_NAMES Provisioning template Names
Comma separated list
of values.
--description DESCRIPTION description
--domain-ids DOMAIN_IDS Domain IDs
Comma separated list
of values.
--environment-ids ENVIRONMENT_IDS Environment IDs
Comma separated list
of values.
--environments ENVIRONMENT_NAMES Environment Names
Comma separated list
of values.
--hostgroup-ids HOSTGROUP_IDS Host group IDs
Comma separated list
of values.
--hostgroups HOSTGROUP_NAMES Host group Names
Comma separated list
of values.
--label LABEL unique label
--media MEDIUM_NAMES Media Names
Comma separated list
of values.
--media-ids MEDIA_IDS Media IDs
Comma separated list
of values.
--name NAME name
--realms REALM_NAMES Realm Names
Comma separated list
of values.
--realm-ids REALM_IDS Realm IDs
Comma separated list
of values.
--smart-proxies SMART_PROXY_NAMES Smart proxy Names
Comma separated list
of values.
--smart-proxy-ids SMART_PROXY_IDS Smart proxy IDs
Comma separated list
of values.
--subnet-ids SUBNET_IDS Subnet IDs
Comma separated list
of values.
--subnets SUBNET_NAMES Subnet Names
Comma separated list
of values.
--user-ids USER_IDS User IDs
Comma separated list
of values.
--users USER_NAMES User Names
Comma separated list
of values.
-h, --help print help
"""
# Assigning default values for attributes
args = {
u'compute-resource-ids': None,
u'compute-resources': None,
u'config-template-ids': None,
u'config-templates': None,
u'description': None,
u'domain-ids': None,
u'environment-ids': None,
u'environments': None,
u'hostgroup-ids': None,
u'hostgroups': None,
u'label': None,
u'media-ids': None,
u'media': None,
u'name': gen_alphanumeric(6),
u'realm-ids': None,
u'realms': None,
u'smart-proxy-ids': None,
u'smart-proxies': None,
u'subnet-ids': None,
u'subnets': None,
u'user-ids': None,
u'users': None,
}
org_cls = _entity_with_credentials(credentials, Org)
return create_object(org_cls, args, options)
@cacheable
def make_realm(options=None):
"""
Usage::
hammer realm create [OPTIONS]
Options::
--location-ids LOCATION_IDS REPLACE locations with given ids
Comma separated list of values.
Values containing comma should
be double quoted
--locations LOCATION_NAMES Comma separated list of values.
Values containing comma should
be double quoted
--name NAME The realm name, e.g. EXAMPLE.COM
--organization-ids ORGANIZATION_IDS REPLACE organizations with
given ids.
Comma separated list of values.
Values containing comma should
be double quoted
--organizations ORGANIZATION_NAMES Comma separated list of values.
Values containing comma should
be double quoted
--realm-proxy-id REALM_PROXY_ID Capsule ID to use within this realm
--realm-type REALM_TYPE Realm type, e.g.
Red Hat Identity Management
or Active Directory
-h, --help print help
"""
# Assigning default values for attributes
args = {
u'location-ids': None,
u'locations': None,
u'name': gen_alphanumeric(6),
u'organization-ids': None,
u'organizations': None,
u'realm-proxy-id': None,
u'realm-type': None,
}
return create_object(Realm, args, options)
@cacheable
def make_os(options=None):
"""
Usage::
hammer os create [OPTIONS]
Options::
--architecture-ids ARCHITECTURE_IDS IDs of associated
architectures. Comma
separated list of values.
--architectures ARCHITECTURE_NAMES Comma separated list of
values.
--config-template-ids CONFIG_TEMPLATE_IDS IDs of associated
provisioning templates. Comma
separated list of values.
--config-templates CONFIG_TEMPLATE_NAMES Comma separated list of
values.
--description DESCRIPTION
--family FAMILY
--major MAJOR
--media MEDIUM_NAMES Comma separated list of
values.
--medium-ids MEDIUM_IDS IDs of associated media.
Comma separated list of
values.
--minor MINOR
--name NAME
--partition-table-ids PARTITION_TABLE_IDS IDs of associated partition
tables. Comma separated list
of values.
--partition-tables PARTITION_TABLE_NAMES Comma separated list of
values.
--password-hash PASSWORD_HASH Root password hash function
to use, one of MD5, SHA256,
SHA512
--release-name RELEASE_NAME
-h, --help print help
"""
# Assigning default values for attributes
args = {
u'architecture-ids': None,
u'architectures': None,
u'config-template-ids': None,
u'config-templates': None,
u'description': None,
u'family': None,
u'major': random.randint(0, 10),
u'media': None,
u'medium-ids': None,
u'minor': random.randint(0, 10),
u'name': gen_alphanumeric(6),
u'partition-table-ids': None,
u'partition-tables': None,
u'password-hash': None,
u'release-name': None,
}
return create_object(OperatingSys, args, options)
@cacheable
def make_scapcontent(options=None):
"""
Usage::
scap-content create [OPTIONS]
Options::
--location-ids LOCATION_IDS REPLACE locations with given ids
Comma separated list of values.
Values containing comma should
be double quoted
--locations LOCATION_NAMES Comma separated list of values.
Values containing comma should
be double quoted
--organization-ids ORGANIZATION_IDS REPLACE organizations with given
ids.
Comma separated list of values.
Values containing comma should
be double quoted
--organizations ORGANIZATION_NAMES Comma separated list of values.
Values containing comma should
be double quoted
--original-filename ORIGINAL_FILENAME Original file name of the XML
file
--scap-file SCAP_FILE Scap content file
--title TITLE SCAP content name
-h, --help print help
"""
# Assigning default values for attributes
args = {
u'scap-file': None,
u'original-filename': None,
u'location-ids': None,
u'locations': None,
u'title': gen_alphanumeric().lower(),
u'organization-ids': None,
u'organizations': None,
}
return create_object(Scapcontent, args, options)
@cacheable
def make_domain(options=None):
"""
Usage::
hammer domain create [OPTIONS]
Options::
--description DESC Full name describing the domain
--dns-id DNS_ID DNS Proxy to use within this domain
--location-ids LOCATION_IDS REPLACE locations with given ids
Comma separated list of values.
--locations LOCATION_NAMES Comma separated list of values.
--name NAME The full DNS Domain name
--organization-ids ORGANIZATION_IDS REPLACE organizations with
given ids.
Comma separated list of values.
--organizations ORGANIZATION_NAMES Comma separated list of values.
-h, --help print help
"""
# Assigning default values for attributes
args = {
u'description': None,
u'dns-id': None,
u'location-ids': None,
u'locations': None,
u'name': gen_alphanumeric().lower(),
u'organization-ids': None,
u'organizations': None,
}
return create_object(Domain, args, options)
@cacheable
def make_hostgroup(options=None):
"""
Usage::
hammer hostgroup create [OPTIONS]
Options::
--architecture ARCHITECTURE_NAME Architecture name
--architecture-id ARCHITECTURE_ID
--ask-root-pass ASK_ROOT_PW One of true/false, yes/no, 1/0.
--compute-profile COMPUTE_PROFILE_NAME Name to search by
--compute-profile-id COMPUTE_PROFILE_ID
--config-group-ids CONFIG_GROUP_IDS IDs of associated config groups
--config-groups CONFIG_GROUP_NAMES
--content-source-id CONTENT_SOURCE_ID
--content-view CONTENT_VIEW_NAME Name to search by
--content-view-id CONTENT_VIEW_ID content view numeric identifier
--domain DOMAIN_NAME Domain name
--domain-id DOMAIN_ID Numerical ID or domain name
--environment ENVIRONMENT_NAME Environment name
--environment-id ENVIRONMENT_ID
--group-parameters-attributes GROUP_PARAMETERS_ATTRIBUTES Array of
parameters
--kickstart-repository-id KICKSTART_REPOSITORY_ID Kickstart
repository ID
--lifecycle-environment LIFECYCLE_ENVIRONMENT_NAME Name to search by
--lifecycle-environment-id LIFECYCLE_ENVIRONMENT_ID ID of the
environment
--locations LOCATION_NAMES Comma separated list of values
--location-titles LOCATION_TITLES
--location-ids LOCATION_IDS REPLACE locations with given ids
Comma separated list of values.
--medium MEDIUM_NAME Medium name
--medium-id MEDIUM_ID
--name NAME
--openscap-proxy-id OPENSCAP_PROXY_ID ID of OpenSCAP Capsule
--operatingsystem OPERATINGSYSTEM_TITLE Operating system title
--operatingsystem-id OPERATINGSYSTEM_ID
--organizations ORGANIZATION_NAMES Comma separated list of values
--organization-titles ORGANIZATION_TITLES
--organization-ids ORGANIZATION_IDS REPLACE organizations with
given ids.
Comma separated list of values.
--parent PARENT_NAME Name of parent hostgroup
--parent-id PARENT_ID
--partition-table PTABLE_NAME Partition table name
--partition-table-id PTABLE_ID
--puppet-ca-proxy PUPPET_CA_PROXY_NAME Name of puppet CA proxy
--puppet-ca-proxy-id PUPPET_CA_PROXY_ID
--puppet-class-ids PUPPETCLASS_IDS List of puppetclass ids
Comma separated list of values.
--puppet-classes PUPPET_CLASS_NAMES Comma separated list of values.
--puppet-proxy PUPPET_CA_PROXY_NAME Name of puppet proxy
--puppet-proxy-id PUPPET_PROXY_ID
--pxe-loader PXE_LOADER DHCP filename option (
Grub2/PXELinux by default)
--query-organization ORGANIZATION_NAME Organization name to search by
--query-organization-id ORGANIZATION_ID Organization ID to search by
--query-organization-label ORGANIZATION_LABEL Organization label to
search by
--realm REALM_NAME Name to search by
--realm-id REALM_ID Numerical ID or realm name
--root-pass ROOT_PASSWORD
--subnet SUBNET_NAME Subnet name
--subnet-id SUBNET_ID
-h, --help print help
"""
# Assigning default values for attributes
args = {
u'architecture': None,
u'architecture-id': None,
u'compute-profile': None,
u'compute-profile-id': None,
u'config-group-ids': None,
u'config-groups': None,
u'content-source-id': None,
u'content-source': None,
u'content-view': None,
u'content-view-id': None,
u'domain': None,
u'domain-id': None,
u'environment': None,
u'puppet-environment': None,
u'environment-id': None,
u'puppet-environment-id': None,
u'locations': None,
u'location-ids': None,
u'kickstart-repository-id': None,
u'lifecycle-environment': None,
u'lifecycle-environment-id': None,
u'lifecycle-environment-organization-id': None,
u'medium': None,
u'medium-id': None,
u'name': gen_alphanumeric(6),
u'operatingsystem': None,
u'operatingsystem-id': None,
u'organizations': None,
u'organization-titles': None,
u'organization-ids': None,
u'parent': None,
u'parent-id': None,
u'partition-table': None,
u'partition-table-id': None,
u'puppet-ca-proxy': None,
u'puppet-ca-proxy-id': None,
u'puppet-class-ids': None,
u'puppet-classes': None,
u'puppet-proxy': None,
u'puppet-proxy-id': None,
u'pxe-loader': None,
u'query-organization': None,
u'query-organization-id': None,
u'query-organization-label': None,
u'realm': None,
u'realm-id': None,
u'subnet': None,
u'subnet-id': None,
}
return create_object(HostGroup, args, options)
@cacheable
def make_medium(options=None):
"""
Usage::
hammer medium create [OPTIONS]
Options::
--location-ids LOCATION_IDS REPLACE locations with given ids
Comma separated list of values.
--locations LOCATION_NAMES Comma separated list of values.
--name NAME Name of media
--operatingsystem-ids OPERATINGSYSTEM_IDS REPLACE organizations with
given ids.
Comma separated list of
values.
--operatingsystems OPERATINGSYSTEM_TITLES Comma separated list of
values.
--organization-ids ORGANIZATION_IDS Comma separated list of
values.
--organizations ORGANIZATION_NAMES Comma separated list of
values.
--os-family OS_FAMILY The family that the operating system belongs
to. Available families:
Archlinux
Debian
Gentoo
Redhat
Solaris
Suse
Windows
--path PATH The path to the medium, can be a URL or a valid
NFS server (exclusive of the architecture)
for example http://mirror.centos.org/centos/
$version/os/$arch where $arch will be
substituted for the host’s actual OS
architecture and $version, $major and $minor
will be substituted for the version of the
operating system.
Solaris and Debian media may also use $release.
-h, --help print help
"""
# Assigning default values for attributes
args = {
u'location-ids': None,
u'locations': None,
u'name': gen_alphanumeric(6),
u'operatingsystem-ids': None,
u'operatingsystems': None,
u'organization-ids': None,
u'organizations': None,
u'os-family': None,
u'path': 'http://{0}'.format((gen_string('alpha', 6))),
}
return create_object(Medium, args, options)
@cacheable
def make_environment(options=None):
"""
Usage::
hammer environment create [OPTIONS]
Options::
--location-ids LOCATION_IDS REPLACE locations with given ids
Comma separated list of values.
--locations LOCATION_NAMES Comma separated list of values.
--name NAME
--organization-ids ORGANIZATION_IDS REPLACE organizations with given
ids.
Comma separated list of values.
--organizations ORGANIZATION_NAMES Comma separated list of values.
"""
# Assigning default values for attributes
args = {
u'location-ids': None,
u'locations': None,
u'name': gen_alphanumeric(6),
u'organization-ids': None,
u'organizations': None,
}
return create_object(Environment, args, options)
@cacheable
def make_lifecycle_environment(options=None):
"""
Usage::
hammer lifecycle-environment create [OPTIONS]
Options::
--description DESCRIPTION description of the environment
--label LABEL label of the environment
--name NAME name of the environment
--organization ORGANIZATION_NAME Organization name to search by
--organization-id ORGANIZATION_ID organization ID
--organization-label ORGANIZATION_LABEL Organization label to search by
--prior PRIOR Name of an environment that is prior to
the new environment in the chain. It has to
be either ‘Library’ or an environment at
the end of a chain.
--registry-name-pattern REGISTRY_NAME_PATTERN Pattern for container
image names
--registry-unauthenticated-pull REGISTRY_UNAUTHENTICATED_PULL Allow
unauthenticed pull of container images
-h, --help print help
"""
# Organization Name, Label or ID is a required field.
if (
not options or
'organization' not in options and
'organization-label' not in options and
'organization-id' not in options):
raise CLIFactoryError('Please provide a valid Organization.')
if not options.get('prior'):
options['prior'] = 'Library'
# Assigning default values for attributes
args = {
u'description': None,
u'label': None,
u'name': gen_alphanumeric(6),
u'organization': None,
u'organization-id': None,
u'organization-label': None,
u'prior': None,
u'registry-name-pattern': None,
u'registry-unauthenticated-pull': None,
}
return create_object(LifecycleEnvironment, args, options)
@cacheable
def make_tailoringfile(options=None):
"""
Usage::
tailoring-file create [OPTIONS]
Options::
--location-ids LOCATION_IDS REPLACE locations with given ids
Comma separated list of values.
Values containing comma should
be double quoted.
--locations LOCATION_NAMES Comma separated list of values.
Values containing comma should
be double quoted
--name NAME Tailoring file name
--organization-ids ORGANIZATION_IDS REPLACE organizations with given
ids.
Comma separated list of values.
Values containing comma should
be double quoted
--organizations ORGANIZATION_NAMES Comma separated list of values.
Values containing comma should
be double quoted
--original-filename ORIGINAL_FILENAME Original file name of the XML
file
--scap-file SCAP_FILE Tailoring file content
-h, --help print help
"""
# Assigning default values for attributes
args = {
u'scap-file': None,
u'original-filename': None,
u'location-ids': None,
u'locations': None,
u'name': gen_alphanumeric().lower(),
u'organization-ids': None,
u'organizations': None,
}
return create_object(TailoringFiles, args, options)
@cacheable
def make_template(options=None):
"""
Usage::
hammer template create [OPTIONS]
Options::
--audit-comment AUDIT_COMMENT
--file TEMPLATE Path to a file that contains the template
--location-ids LOCATION_IDS REPLACE locations with given ids
Comma separated list of values.
--locked LOCKED Whether or not the template is locked
for editing
One of true/false, yes/no, 1/0.
--name NAME template name
--operatingsystem-ids OPERATINGSYSTEM_IDS
Array of operating systems ID to associate the
template with Comma separated list of values.
--organization-ids ORGANIZATION_IDS REPLACE organizations with
given ids.
Comma separated list of values.
--type TYPE Template type. Eg. snippet, script, provision
-h, --help print help
"""
# Assigning default values for attribute
args = {
u'audit-comment': None,
u'file': '/tmp/{0}'.format(gen_alphanumeric()),
u'location-ids': None,
u'locked': None,
u'name': gen_alphanumeric(6),
u'operatingsystem-ids': None,
u'organization-ids': None,
u'type': random.choice(TEMPLATE_TYPES),
}
# Write content to file or random text
if options is not None and 'content' in options.keys():
content = options.pop('content')
else:
content = gen_alphanumeric()
# Special handling for template factory
(_, layout) = mkstemp(text=True)
chmod(layout, 0o700)
with open(layout, 'w') as ptable:
ptable.write(content)
# Upload file to server
ssh.upload_file(local_file=layout, remote_file=args['file'])
# End - Special handling for template factory
return create_object(Template, args, options)
@cacheable
def make_smart_variable(options=None):
"""
Usage::
hammer smart-variable create [OPTIONS]
Options::
--avoid-duplicates AVOID_DUPLICATES Remove duplicate values (
only array type)
One of true/false, yes/no,
1/0.
--default-value DEFAULT_VALUE Default value of variable
--description DESCRIPTION Description of variable
--hidden-value HIDDEN_VALUE When enabled the parameter
is hidden in the UI
One of true/false, yes/no,
1/0.
--merge-default MERGE_DEFAULT Include default value when
merging all matching values
One of true/false, yes/no,
1/0.
--merge-overrides MERGE_OVERRIDES Merge all matching values(
only array/hash type)
One of true/false, yes/no,
1/0.
--override-value-order OVERRIDE_VALUE_ORDER The order in which values
are resolved
--puppet-class PUPPET_CLASS_NAME Puppet class name
--puppet-class-id PUPPET_CLASS_ID ID of Puppet class
--validator-rule VALIDATOR_RULE Used to enforce certain
values for the parameter
values
--validator-type VALIDATOR_TYPE Type of the validator.
Possible value(s):
'regexp', 'list', ''
--variable VARIABLE Name of variable
--variable-type VARIABLE_TYPE Type of the variable.
Possible value(s):
'string', 'boolean',
'integer', 'real', 'array',
'hash', 'yaml', 'json'
-h, --help print help
"""
# Puppet class name or ID is a required field.
if (
not options or
'puppet-class' not in options and
'puppet-class-id' not in options):
raise CLIFactoryError('Please provide a valid Puppet class')
# Assigning default values for attributes
args = {
u'avoid-duplicates': None,
u'default-value': None,
u'description': None,
u'hidden-value': None,
u'merge-default': None,
u'merge-overrides': None,
u'override-value-order': None,
u'puppet-class': None,
u'puppet-class-id': None,
u'validator-rule': None,
u'validator-type': None,
u'variable': gen_alphanumeric(),
u'variable-type': None,
}
return create_object(SmartVariable, args, options)
@cacheable
def make_virt_who_config(options=None):
"""
Usage::
hammer virt-who-config create [OPTIONS]
Options::
--blacklist BLACKLIST Hypervisor blacklist, applicable only when
filtering mode is set to 2.
Wildcards and regular expressions are
supported, multiple records must be
separated by comma.
--debug DEBUG Enable debugging output
One of true/false, yes/no, 1/0.
--filtering-mode MODE Hypervisor filtering mode
Possible value(s): 'none', 'whitelist',
'blacklist'
--hypervisor-id HYPERVISOR_ID Specifies how the hypervisor will be
identified.
Possible value(s): 'hostname', 'uuid',
'hwuuid'
--hypervisor-password HYPERVISOR_PASSWORD Hypervisor password, required
for all hypervisor types
except for libvirt
--hypervisor-server HYPERVISOR_SERVER Fully qualified host name or
IP address of the hypervisor
--hypervisor-type HYPERVISOR_TYPE Hypervisor type
Possible value(s): 'esx',
'rhevm', 'hyperv', 'xen',
'libvirt'
--hypervisor-username HYPERVISOR_USERNAME Account name by which
virt-who is to connect to the
hypervisor.
--interval INTERVAL Configuration interval in minutes
Possible value(s): '60', '120', '240', '480',
'720'
--name NAME Configuration name
--no-proxy NO_PROXY Ignore Proxy. A comma-separated list of hostnames
or domains or ip addresses to ignore proxy
settings for. Optionally this may be set to * to
bypass proxy settings for all hostnames domains
or ip addresses.
--organization ORGANIZATION_NAME Organization name
--organization-id ORGANIZATION_ID organization ID
--organization-title ORGANIZATION_TITLE Organization title
--proxy PROXY HTTP Proxy that should be used for communication
between the server on which virt-who is running
and the hypervisors and virtualization managers.
--satellite-url SATELLITE_URL Satellite server FQDN
--whitelist WHITELIST Hypervisor whitelist, applicable only when
filtering mode is set to 1.
Wildcards and regular expressions are supported,
multiple records must be separated by comma.
-h, --help print help
"""
args = {
u'blacklist': None,
u'debug': None,
u'filtering-mode': 'none',
u'hypervisor-id': 'hostname',
u'hypervisor-password': None,
u'hypervisor-server': None,
u'hypervisor-type': None,
u'hypervisor-username': None,
u'interval': '60',
u'name': gen_alphanumeric(6),
u'no-proxy': None,
u'organization': None,
u'organization-id': None,
u'organization-title': None,
u'proxy': None,
u'satellite-url': settings.server.hostname,
u'whitelist': None
}
return create_object(VirtWhoConfig, args, options)
def activationkey_add_subscription_to_repo(options=None):
"""
Adds subscription to activation key.
Args::
organization-id - ID of organization
activationkey-id - ID of activation key
subscription - subscription name
"""
if(
not options or
not options.get('organization-id') or
not options.get('activationkey-id') or
not options.get('subscription')):
raise CLIFactoryError(
'Please provide valid organization, activation key and '
'subscription.'
)
# List the subscriptions in given org
subscriptions = Subscription.list(
{u'organization-id': options['organization-id']},
per_page=False
)
# Add subscription to activation-key
if options['subscription'] not in (sub['name'] for sub in subscriptions):
raise CLIFactoryError(
u'Subscription {0} not found in the given org'
.format(options['subscription'])
)
for subscription in subscriptions:
if subscription['name'] == options['subscription']:
if (
subscription['quantity'] != 'Unlimited' and
int(subscription['quantity']) == 0):
raise CLIFactoryError(
'All the subscriptions are already consumed')
try:
ActivationKey.add_subscription({
u'id': options['activationkey-id'],
u'subscription-id': subscription['id'],
u'quantity': 1,
})
except CLIReturnCodeError as err:
raise CLIFactoryError(
u'Failed to add subscription to activation key\n{0}'
.format(err.msg)
)
def setup_org_for_a_custom_repo(options=None):
"""Sets up Org for the given custom repo by:
1. Checks if organization and lifecycle environment were given, otherwise
creates new ones.
2. Creates a new product with the custom repo. Synchronizes the repo.
3. Checks if content view was given, otherwise creates a new one and
- adds the RH repo
- publishes
- promotes to the lifecycle environment
4. Checks if activation key was given, otherwise creates a new one and
associates it with the content view.
5. Adds the custom repo subscription to the activation key
Options::
url - URL to custom repository
organization-id (optional) - ID of organization to use (or create a new
one if empty)
lifecycle-environment-id (optional) - ID of lifecycle environment to
use (or create a new one if empty)
content-view-id (optional) - ID of content view to use (or create a new
one if empty)
activationkey-id (optional) - ID of activation key (or create a new one
if empty)
:return: A dictionary with the entity ids of Activation key, Content view,
Lifecycle Environment, Organization, Product and Repository
"""
if(
not options or
not options.get('url')):
raise CLIFactoryError('Please provide valid custom repo URL.')
# Create new organization and lifecycle environment if needed
if options.get('organization-id') is None:
org_id = make_org()['id']
else:
org_id = options['organization-id']
if options.get('lifecycle-environment-id') is None:
env_id = make_lifecycle_environment({u'organization-id': org_id})['id']
else:
env_id = options['lifecycle-environment-id']
# Create custom product and repository
custom_product = make_product({u'organization-id': org_id})
custom_repo = make_repository({
u'content-type': 'yum',
u'product-id': custom_product['id'],
u'url': options.get('url'),
})
# Synchronize custom repository
try:
Repository.synchronize({'id': custom_repo['id']})
except CLIReturnCodeError as err:
raise CLIFactoryError(
u'Failed to synchronize repository\n{0}'.format(err.msg))
# Create CV if needed and associate repo with it
if options.get('content-view-id') is None:
cv_id = make_content_view({u'organization-id': org_id})['id']
else:
cv_id = options['content-view-id']
try:
ContentView.add_repository({
u'id': cv_id,
u'organization-id': org_id,
u'repository-id': custom_repo['id'],
})
except CLIReturnCodeError as err:
raise CLIFactoryError(
u'Failed to add repository to content view\n{0}'.format(err.msg))
# Publish a new version of CV
try:
ContentView.publish({u'id': cv_id})
except CLIReturnCodeError as err:
raise CLIFactoryError(
u'Failed to publish new version of content view\n{0}'
.format(err.msg)
)
# Get the version id
cvv = ContentView.info({u'id': cv_id})['versions'][-1]
# Promote version to next env
try:
ContentView.version_promote({
u'id': cvv['id'],
u'organization-id': org_id,
u'to-lifecycle-environment-id': env_id,
})
except CLIReturnCodeError as err:
raise CLIFactoryError(
u'Failed to promote version to next environment\n{0}'
.format(err.msg)
)
# Create activation key if needed and associate content view with it
if options.get('activationkey-id') is None:
activationkey_id = make_activation_key({
u'content-view-id': cv_id,
u'lifecycle-environment-id': env_id,
u'organization-id': org_id,
})['id']
else:
activationkey_id = options['activationkey-id']
# Given activation key may have no (or different) CV associated.
# Associate activation key with CV just to be sure
try:
ActivationKey.update({
u'content-view-id': cv_id,
u'id': activationkey_id,
u'organization-id': org_id,
})
except CLIReturnCodeError as err:
raise CLIFactoryError(
u'Failed to associate activation-key with CV\n{0}'
.format(err.msg)
)
# Add subscription to activation-key
activationkey_add_subscription_to_repo({
u'activationkey-id': activationkey_id,
u'organization-id': org_id,
u'subscription': custom_product['name'],
})
return {
u'activationkey-id': activationkey_id,
u'content-view-id': cv_id,
u'lifecycle-environment-id': env_id,
u'organization-id': org_id,
u'product-id': custom_product['id'],
u'repository-id': custom_repo['id'],
}
def _setup_org_for_a_rh_repo(options=None):
"""Sets up Org for the given Red Hat repository by:
1. Checks if organization and lifecycle environment were given, otherwise
creates new ones.
2. Clones and uploads manifest.
3. Enables RH repo and synchronizes it.
4. Checks if content view was given, otherwise creates a new one and
- adds the RH repo
- publishes
- promotes to the lifecycle environment
5. Checks if activation key was given, otherwise creates a new one and
associates it with the content view.
6. Adds the RH repo subscription to the activation key
Note that in most cases you should use ``setup_org_for_a_rh_repo`` instead
as it's more flexible.
Options::
product - RH product name
repository-set - RH repository set name
repository - RH repository name
releasever (optional) - Repository set release version, don't specify
it if enabling the Satellite 6 Tools repo.
organization-id (optional) - ID of organization to use (or create a new
one if empty)
lifecycle-environment-id (optional) - ID of lifecycle environment to
use (or create a new one if empty)
content-view-id (optional) - ID of content view to use (or create a new
one if empty)
activationkey-id (optional) - ID of activation key (or create a new one
if empty)
subscription (optional) - subscription name (or use the default one
if empty)
:return: A dictionary with the entity ids of Activation key, Content view,
Lifecycle Environment, Organization and Repository
"""
if (
not options or
not options.get('product') or
not options.get('repository-set') or
not options.get('repository')):
raise CLIFactoryError(
'Please provide valid product, repository-set and repo.')
# Create new organization and lifecycle environment if needed
if options.get('organization-id') is None:
org_id = make_org()['id']
else:
org_id = options['organization-id']
if options.get('lifecycle-environment-id') is None:
env_id = make_lifecycle_environment({u'organization-id': org_id})['id']
else:
env_id = options['lifecycle-environment-id']
# Clone manifest and upload it
with manifests.clone() as manifest:
upload_file(manifest.content, manifest.filename)
try:
Subscription.upload({
u'file': manifest.filename,
u'organization-id': org_id,
})
except CLIReturnCodeError as err:
raise CLIFactoryError(
u'Failed to upload manifest\n{0}'.format(err.msg))
# Enable repo from Repository Set
try:
RepositorySet.enable({
u'basearch': 'x86_64',
u'name': options['repository-set'],
u'organization-id': org_id,
u'product': options['product'],
u'releasever': options.get('releasever'),
})
except CLIReturnCodeError as err:
raise CLIFactoryError(
u'Failed to enable repository set\n{0}'.format(err.msg))
# Fetch repository info
try:
rhel_repo = Repository.info({
u'name': options['repository'],
u'organization-id': org_id,
u'product': options['product'],
})
except CLIReturnCodeError as err:
raise CLIFactoryError(
u'Failed to fetch repository info\n{0}'.format(err.msg))
# Synchronize the RH repository
try:
Repository.synchronize({
u'name': options['repository'],
u'organization-id': org_id,
u'product': options['product'],
})
except CLIReturnCodeError as err:
raise CLIFactoryError(
u'Failed to synchronize repository\n{0}'.format(err.msg))
# Create CV if needed and associate repo with it
if options.get('content-view-id') is None:
cv_id = make_content_view({u'organization-id': org_id})['id']
else:
cv_id = options['content-view-id']
try:
ContentView.add_repository({
u'id': cv_id,
u'organization-id': org_id,
u'repository-id': rhel_repo['id'],
})
except CLIReturnCodeError as err:
raise CLIFactoryError(
u'Failed to add repository to content view\n{0}'.format(err.msg))
# Publish a new version of CV
try:
ContentView.publish({u'id': cv_id})
except CLIReturnCodeError as err:
raise CLIFactoryError(
u'Failed to publish new version of content view\n{0}'
.format(err.msg)
)
# Get the version id
try:
cvv = ContentView.info({u'id': cv_id})['versions'][-1]
except CLIReturnCodeError as err:
raise CLIFactoryError(
u'Failed to fetch content view info\n{0}'.format(err.msg))
# Promote version1 to next env
try:
ContentView.version_promote({
u'id': cvv['id'],
u'organization-id': org_id,
u'to-lifecycle-environment-id': env_id,
})
except CLIReturnCodeError as err:
raise CLIFactoryError(
u'Failed to promote version to next environment\n{0}'
.format(err.msg)
)
# Create activation key if needed and associate content view with it
if options.get('activationkey-id') is None:
activationkey_id = make_activation_key({
u'content-view-id': cv_id,
u'lifecycle-environment-id': env_id,
u'organization-id': org_id,
})['id']
else:
activationkey_id = options['activationkey-id']
# Given activation key may have no (or different) CV associated.
# Associate activation key with CV just to be sure
try:
ActivationKey.update({
u'id': activationkey_id,
u'organization-id': org_id,
u'content-view-id': cv_id,
})
except CLIReturnCodeError as err:
raise CLIFactoryError(
u'Failed to associate activation-key with CV\n{0}'
.format(err.msg)
)
# Add subscription to activation-key
activationkey_add_subscription_to_repo({
u'organization-id': org_id,
u'activationkey-id': activationkey_id,
u'subscription': options.get(
u'subscription', DEFAULT_SUBSCRIPTION_NAME),
})
return {
u'activationkey-id': activationkey_id,
u'content-view-id': cv_id,
u'lifecycle-environment-id': env_id,
u'organization-id': org_id,
u'repository-id': rhel_repo['id'],
}
def setup_org_for_a_rh_repo(options=None, force_manifest_upload=False,
force_use_cdn=False):
"""Wrapper above ``_setup_org_for_a_rh_repo`` to use custom downstream repo
instead of CDN's 'Satellite Capsule' and 'Satellite Tools' if
``settings.cdn == 0`` and URL for custom repositories is set in properties.
:param options: a dict with options to pass to function
``_setup_org_for_a_rh_repo``. See its docstring for more details
:param force_use_cdn: bool flag whether to use CDN even if there's
downstream repo available and ``settings.cdn == 0``.
:param force_manifest_upload: bool flag whether to upload a manifest to
organization even if downstream custom repo is used instead of CDN.
Useful when test relies on organization with manifest (e.g. uses some
other RH repo afterwards). Defaults to False.
:return: a dict with entity ids (see ``_setup_org_for_a_rh_repo`` and
``setup_org_for_a_custom_repo``).
"""
custom_repo_url = None
if options.get('repository') == REPOS['rhst6']['name']:
custom_repo_url = settings.sattools_repo['rhel6']
elif options.get('repository') == REPOS['rhst7']['name']:
custom_repo_url = settings.sattools_repo['rhel7']
elif 'Satellite Capsule' in options.get('repository'):
custom_repo_url = settings.capsule_repo
if force_use_cdn or settings.cdn or not custom_repo_url:
return _setup_org_for_a_rh_repo(options)
else:
options['url'] = custom_repo_url
result = setup_org_for_a_custom_repo(options)
if force_manifest_upload:
with manifests.clone() as manifest:
upload_file(manifest.content, manifest.filename)
try:
Subscription.upload({
u'file': manifest.filename,
u'organization-id': result.get('organization-id'),
})
except CLIReturnCodeError as err:
raise CLIFactoryError(
u'Failed to upload manifest\n{0}'.format(err.msg))
# attach the default subscription to activation key
activationkey_add_subscription_to_repo({
'activationkey-id': result[u'activationkey-id'],
'organization-id': result[u'organization-id'],
'subscription': DEFAULT_SUBSCRIPTION_NAME,
})
return result
def configure_env_for_provision(org=None, loc=None):
"""Create and configure org, loc, product, repo, env. Update proxy,
domain, subnet, compute resource, provision templates and medium with
previously created entities and create a hostgroup using all mentioned
entities.
:param org: Default Organization that should be used in both host
discovering and host provisioning procedures
:param loc: Default Location that should be used in both host
discovering and host provisioning procedures
:return: List of created entities that can be re-used further in
provisioning or validation procedure (e.g. hostgroup or subnet)
"""
# Create new organization and location in case they were not passed
if org is None:
org = make_org()
if loc is None:
loc = make_location()
# Get a Library Lifecycle environment and the default CV for the org
lce = LifecycleEnvironment.info(
{u'name': u'Library', 'organization-id': org['id']}
)
cv = ContentView.info(
{u'name': u'Default Organization View', u'organization-id': org['id']}
)
# Create puppet environment and associate organization and location
env = make_environment({
'location-ids': loc['id'],
'organization-ids': org['id'],
})
# get default capsule and associate location
puppet_proxy = Proxy.info({'id': Proxy.list({
u'search': settings.server.hostname
})[0]['id']})
Proxy.update({
'id': puppet_proxy['id'],
'locations': list(
set(puppet_proxy.get('locations') or []) | {loc['name']}),
})
# Network
# Search for existing domain or create new otherwise. Associate org,
# location and dns to it
_, _, domain_name = settings.server.hostname.partition('.')
domain = Domain.list({'search': 'name={0}'.format(domain_name)})
if len(domain) == 1:
domain = Domain.info({'id': domain[0]['id']})
Domain.update({
'name': domain_name,
'locations': list(
set(domain.get('locations') or []) | {loc['name']}),
'organizations': list(
set(domain.get('organizations') or []) | {org['name']}),
'dns-id': puppet_proxy['id'],
})
else:
# Create new domain
domain = make_domain({
'name': domain_name,
'location-ids': loc['id'],
'organization-ids': org['id'],
'dns-id': puppet_proxy['id'],
})
# Search if subnet is defined with given network. If so, just update its
# relevant fields otherwise create new subnet
network = settings.vlan_networking.subnet
subnet = Subnet.list({'search': 'network={0}'.format(network)})
if len(subnet) >= 1:
subnet = Subnet.info({'id': subnet[0]['id']})
Subnet.update({
'name': subnet['name'],
'domains': list(
set(subnet.get('domains') or []) | {domain['name']}),
'locations': list(
set(subnet.get('locations') or []) | {loc['name']}),
'organizations': list(
set(subnet.get('organizations') or []) | {org['name']}),
'dhcp-id': puppet_proxy['id'],
'dns-id': puppet_proxy['id'],
'tftp-id': puppet_proxy['id'],
})
else:
# Create new subnet
subnet = make_subnet({
'name': gen_string('alpha'),
'network': network,
'mask': settings.vlan_networking.netmask,
'domain-ids': domain['id'],
'location-ids': loc['id'],
'organization-ids': org['id'],
'dhcp-id': puppet_proxy['id'],
'dns-id': puppet_proxy['id'],
'tftp-id': puppet_proxy['id'],
})
# Get the Partition table entity
ptable = PartitionTable.info({'name': DEFAULT_PTABLE})
# Get the OS entity
os = OperatingSys.list({
'search': 'name="RedHat" AND major="{0}" OR major="{1}"'.format(
RHEL_6_MAJOR_VERSION, RHEL_7_MAJOR_VERSION)
})[0]
# Get proper Provisioning templates and update with OS, Org, Location
provisioning_template = Template.info({'name': DEFAULT_TEMPLATE})
pxe_template = Template.info({'name': DEFAULT_PXE_TEMPLATE})
for template in provisioning_template, pxe_template:
if os['title'] not in template['operating-systems']:
Template.update({
'id': template['id'],
'locations': list(
set(template.get('locations') or []) | {loc['name']}),
'operatingsystems': list(set(
template.get('operating-systems') or []) | {os['title']}),
'organizations': list(
set(template.get('organizations') or []) | {org['name']}),
})
# Get the architecture entity
arch = Architecture.list(
{'search': 'name={0}'.format(DEFAULT_ARCHITECTURE)})[0]
os = OperatingSys.info({'id': os['id']})
# Get the media and update its location
medium = Medium.list({'search': 'path={0}'.format(settings.rhel7_os)})
if medium:
media = Medium.info({'id': medium[0]['id']})
Medium.update({
'id': media['id'],
'operatingsystems': list(
set(media.get('operating-systems') or []) | {os['title']}),
'locations': list(
set(media.get('locations') or []) | {loc['name']}),
'organizations': list(
set(media.get('organizations') or []) | {org['name']}),
})
else:
media = make_medium({
'location-ids': loc['id'],
'operatingsystem-ids': os['id'],
'organization-ids': org['id'],
'path': settings.rhel7_os
})
# Update the OS with found arch, ptable, templates and media
OperatingSys.update({
'id': os['id'],
'architectures': list(
set(os.get('architectures') or []) | {arch['name']}),
'media': list(
set(os.get('installation-media') or []) | {media['name']}),
'partition-tables': list(
set(os.get('partition-tables') or []) | {ptable['name']}),
})
for template in (provisioning_template, pxe_template):
if '{} ({})'.format(template['name'], template['type']) not in os[
'templates']:
OperatingSys.update({
'id': os['id'],
'config-templates': list(
set(os['templates']) | {template['name']}),
})
# Create new hostgroup using proper entities
hostgroup = make_hostgroup({
'location-ids': loc['id'],
'environment-id': env['id'],
'lifecycle-environment-id': lce['id'],
'puppet-proxy-id': puppet_proxy['id'],
'puppet-ca-proxy-id': puppet_proxy['id'],
'content-view-id': cv['id'],
'domain-id': domain['id'],
'subnet-id': subnet['id'],
'organization-ids': org['id'],
'architecture-id': arch['id'],
'partition-table-id': ptable['id'],
'medium-id': media['id'],
'operatingsystem-id': os['id'],
'content-source-id': puppet_proxy['id'],
})
return {
'hostgroup': hostgroup,
'subnet': subnet,
'domain': domain,
'ptable': ptable,
'os': os
}
def publish_puppet_module(puppet_modules, repo_url, organization_id=None):
"""Creates puppet repo, sync it via provided url and publish using
Content View publishing mechanism. It makes puppet class available
via Puppet Environment created by Content View and returns Content
View entity.
:param puppet_modules: List of dictionaries with module 'author'
and module 'name' fields.
:param str repo_url: Url of the repo that can be synced using pulp:
pulp repo or puppet forge.
:param organization_id: Organization id that is shared between created
entities.
:return: Content View entity.
"""
if not organization_id:
organization_id = make_org()['id']
product = make_product({u'organization-id': organization_id})
repo = make_repository({
u'product-id': product['id'],
u'content-type': 'puppet',
u'url': repo_url,
})
# Synchronize repo via provided URL
Repository.synchronize({'id': repo['id']})
# Add selected module to Content View
cv = make_content_view({u'organization-id': organization_id})
for module in puppet_modules:
ContentView.puppet_module_add({
u'author': module['author'],
u'name': module['name'],
u'content-view-id': cv['id'],
})
# CV publishing will automatically create Environment and
# Puppet Class entities
ContentView.publish({u'id': cv['id']})
return ContentView.info({u'id': cv['id']})
def setup_virtual_machine(
vm, org_label, rh_repos_id=None, repos_label=None, product_label=None,
lce=None, activation_key=None, patch_os_release_distro=None,
install_katello_agent=True):
"""
Setup a Virtual machine with basic components and tasks.
:param robottelo.vm.VirtualMachine vm: The Virtual machine to setup.
:param str org_label: The Organization label.
:param list rh_repos_id: a list of RH repositories ids to enable.
:param list repos_label: a list of custom repositories labels to enable.
:param str product_label: product label if repos_label is applicable.
:param str lce: Lifecycle environment label if applicable.
:param str activation_key: Activation key name if applicable.
:param str patch_os_release_distro: distro name, to patch the VM with os
version.
:param bool install_katello_agent: whether to install katello agent.
"""
if rh_repos_id is None:
rh_repos_id = []
if repos_label is None:
repos_label = []
vm.install_katello_ca()
vm.register_contenthost(org_label, activation_key=activation_key, lce=lce)
if not vm.subscribed:
raise CLIFactoryError('Virtual machine failed subscription')
if patch_os_release_distro:
vm.patch_os_release_version(distro=patch_os_release_distro)
# Enable RH repositories
for repo_id in rh_repos_id:
vm.enable_repo(repo_id, force=True)
if product_label:
# Enable custom repositories
for repo_label in repos_label:
result = vm.run(
'yum-config-manager --enable {0}_{1}_{2}'.format(
org_label,
product_label,
repo_label,
)
)
if result.return_code != 0:
raise CLIFactoryError(
'Failed to enable custom repository "{0}"\n{1}'.format(
repos_label, result.stderr)
)
if install_katello_agent:
vm.install_katello_agent()
def _get_capsule_vm_distro_repos(distro):
"""Return the right RH repos info for the capsule setup"""
rh_repos = []
if distro == DISTRO_RHEL7:
# Red Hat Enterprise Linux 7 Server
rh_product_arch = REPOS['rhel7']['arch']
rh_product_releasever = REPOS['rhel7']['releasever']
rh_repos.append({
'product': PRDS['rhel'],
'repository-set': REPOSET['rhel7'],
'repository': REPOS['rhel7']['name'],
'repository-id': REPOS['rhel7']['id'],
'releasever': rh_product_releasever,
'arch': rh_product_arch,
'cdn': True,
})
# Red Hat Software Collections (for 7 Server)
rh_repos.append({
'product': PRDS['rhscl'],
'repository-set': REPOSET['rhscl7'],
'repository': REPOS['rhscl7']['name'],
'repository-id': REPOS['rhscl7']['id'],
'releasever': rh_product_releasever,
'arch': rh_product_arch,
'cdn': True,
})
# Red Hat Satellite Capsule 6.2 (for RHEL 7 Server)
rh_repos.append({
'product': PRDS['rhsc'],
'repository-set': REPOSET['rhsc7'],
'repository': REPOS['rhsc7']['name'],
'repository-id': REPOS['rhsc7']['id'],
'url': settings.capsule_repo,
'cdn': bool(settings.cdn or not settings.capsule_repo),
})
else:
raise CLIFactoryError('distro "{}" not supported'.format(distro))
return rh_product_arch, rh_product_releasever, rh_repos
def add_role_permissions(role_id, resource_permissions):
"""Create role permissions found in resource permissions dict
:param role_id: The role id
:param resource_permissions: a dict containing resources with permission
names and other Filter options
Usage::
role = make_role({'organization-id': org['id']})
resource_permissions = {
'Katello::ActivationKey': {
'permissions': [
'view_activation_keys',
'create_activation_keys',
'edit_activation_keys',
'destroy_activation_keys'
],
'search': "name ~ {}".format(ak_name_like)
},
}
add_role_permissions(role['id'], resource_permissions)
"""
available_permissions = Filter.available_permissions()
# group the available permissions by resource type
available_rc_permissions = {}
for permission in available_permissions:
permission_resource = permission['resource']
if permission_resource not in available_rc_permissions:
available_rc_permissions[permission_resource] = []
available_rc_permissions[permission_resource].append(permission)
# create only the required role permissions per resource type
for resource_type, permission_data in resource_permissions.items():
permission_names = permission_data.get('permissions')
if permission_names is None:
raise CLIFactoryError(
'Permissions not provided for resource: {0}'
.format(resource_type)
)
# ensure that the required resource type is available
if resource_type not in available_rc_permissions:
raise CLIFactoryError(
'Resource "{0}" not in the list of available resources'
.format(resource_type)
)
available_permission_names = [
permission['name']
for permission in available_rc_permissions[resource_type]
if permission['name'] in permission_names
]
# ensure that all the required permissions are available
missing_permissions = set(
permission_names).difference(set(available_permission_names))
if missing_permissions:
raise CLIFactoryError(
'Permissions "{0}" are not available in Resource "{1}"'
.format(list(missing_permissions), resource_type)
)
# Create the current resource type role permissions
options = {'role-id': role_id}
options.update(permission_data)
make_filter(options=options)
def setup_cdn_and_custom_repositories(
org_id, repos, download_policy='on_demand', synchronize=True):
"""Setup cdn and custom repositories
:param int org_id: The organization id
:param list repos: a list of dict repositories options
:param str download_policy: update the repositories with this download
policy
:param bool synchronize: Whether to synchronize the repositories.
:return: a dict containing the content view and repos info
"""
custom_product = None
repos_info = []
for repo in repos:
custom_repo_url = repo.get('url')
cdn = repo.get('cdn', False)
if not cdn and not custom_repo_url:
raise CLIFactoryError(u'Custom repository with url not supplied')
if cdn:
if bz_bug_is_open(1655239):
rh_repo_id = enable_rhrepo_and_fetchid(
repo.get('arch', DEFAULT_ARCHITECTURE),
org_id,
repo['product'],
repo['repository'],
repo['repository-set'],
repo.get('releasever')
)
repo_info = Repository.info({'id': rh_repo_id})
else:
RepositorySet.enable({
u'organization-id': org_id,
u'product': repo['product'],
u'name': repo['repository-set'],
u'basearch': repo.get('arch', DEFAULT_ARCHITECTURE),
u'releasever': repo.get('releasever'),
})
repo_info = Repository.info({
u'organization-id': org_id,
u'name': repo['repository'],
u'product': repo['product'],
})
else:
if custom_product is None:
custom_product = make_product_wait({
'organization-id': org_id,
})
repo_info = make_repository({
'product-id': custom_product['id'],
'organization-id': org_id,
'url': custom_repo_url,
})
if download_policy:
# Set download policy
Repository.update({
'download-policy': download_policy,
'id': repo_info['id'],
})
repos_info.append(repo_info)
if synchronize:
# Synchronize the repositories
for repo_info in repos_info:
Repository.synchronize({'id': repo_info['id']}, timeout=4800)
return custom_product, repos_info
def setup_cdn_and_custom_repos_content(
org_id, lce_id=None, repos=None, upload_manifest=True,
download_policy='on_demand', rh_subscriptions=None, default_cv=False):
"""Setup cdn and custom repositories, content view and activations key
:param int org_id: The organization id
:param int lce_id: the lifecycle environment id
:param list repos: a list of dict repositories options
:param bool default_cv: whether to use the Default Organization CV
:param bool upload_manifest: whether to upload the organization manifest
:param str download_policy: update the repositories with this download
policy
:param list rh_subscriptions: a list of RH subscription to attach to
activation key
:return: a dict containing the activation key, content view and repos info
"""
if lce_id is None and not default_cv:
raise TypeError(u'lce_id must be specified')
if repos is None:
repos = []
if rh_subscriptions is None:
rh_subscriptions = []
if upload_manifest:
# Upload the organization manifest
try:
manifests.upload_manifest_locked(org_id, manifests.clone(),
interface=manifests.INTERFACE_CLI)
except CLIReturnCodeError as err:
raise CLIFactoryError(
u'Failed to upload manifest\n{0}'.format(err.msg))
custom_product, repos_info = setup_cdn_and_custom_repositories(
org_id=org_id,
repos=repos,
download_policy=download_policy
)
if default_cv:
activation_key = make_activation_key({
u'organization-id': org_id,
u'lifecycle-environment': 'Library',
})
content_view = ContentView.info({
u'organization-id': org_id,
u'name': u'Default Organization View'
})
else:
# Create a content view
content_view = make_content_view({u'organization-id': org_id})
# Add repositories to content view
for repo_info in repos_info:
ContentView.add_repository({
u'id': content_view['id'],
u'organization-id': org_id,
u'repository-id': repo_info['id'],
})
# Publish the content view
ContentView.publish({u'id': content_view['id']})
# Get the latest content view version id
content_view_version = ContentView.info({
u'id': content_view['id']
})['versions'][-1]
# Promote content view version to lifecycle environment
ContentView.version_promote({
u'id': content_view_version['id'],
u'organization-id': org_id,
u'to-lifecycle-environment-id': lce_id,
})
content_view = ContentView.info({u'id': content_view['id']})
activation_key = make_activation_key({
u'organization-id': org_id,
u'lifecycle-environment-id': lce_id,
u'content-view-id': content_view['id'],
})
# Get organization subscriptions
subscriptions = Subscription.list({
u'organization-id': org_id},
per_page=False
)
# Add subscriptions to activation-key
needed_subscription_names = list(rh_subscriptions)
if custom_product:
needed_subscription_names.append(custom_product['name'])
added_subscription_names = []
for subscription in subscriptions:
if (subscription['name'] in needed_subscription_names
and subscription['name'] not in added_subscription_names):
ActivationKey.add_subscription({
u'id': activation_key['id'],
u'subscription-id': subscription['id'],
u'quantity': 1,
})
added_subscription_names.append(subscription['name'])
if (len(added_subscription_names)
== len(needed_subscription_names)):
break
missing_subscription_names = set(
needed_subscription_names).difference(set(added_subscription_names))
if missing_subscription_names:
raise CLIFactoryError(
u'Missing subscriptions: {0}'.format(missing_subscription_names))
data = dict(
activation_key=activation_key,
content_view=content_view,
product=custom_product,
repos=repos_info,
)
if lce_id:
lce = LifecycleEnvironment.info({
'id': lce_id,
'organization-id': org_id,
})
data['lce'] = lce
return data
def vm_setup_ssh_config(vm, ssh_key_name, host, user=None):
"""Create host entry in vm ssh config and know_hosts files to allow vm
to access host via ssh without password prompt
:param robottelo.vm.VirtualMachine vm: Virtual machine instance
:param str ssh_key_name: The ssh key file name to use to access host,
the file must already exist in /root/.ssh directory
:param str host: the hostname to setup that will be accessed from vm
:param str user: the user that will access the host
"""
if user is None:
user = 'root'
ssh_path = '/root/.ssh'
ssh_key_file_path = '{0}/{1}'.format(ssh_path, ssh_key_name)
# setup the config file
ssh_config_file_path = '{0}/config'.format(ssh_path)
result = vm.run('touch {0}'.format(ssh_config_file_path))
if result.return_code != 0:
raise CLIFactoryError(
u'Failed to create ssh config file:\n{}'
.format(result.stderr)
)
result = vm.run(
'echo "\nHost {0}\n\tHostname {0}\n\tUser {1}\n'
'\tIdentityFile {2}\n" >> {3}'
.format(host, user, ssh_key_file_path, ssh_config_file_path)
)
if result.return_code != 0:
raise CLIFactoryError(
u'Failed to write to ssh config file:\n{}'.format(result.stderr))
# add host entry to ssh known_hosts
result = vm.run(
'ssh-keyscan {0} >> {1}/known_hosts'.format(host, ssh_path))
if result.return_code != 0:
raise CLIFactoryError(
u'Failed to put hostname in ssh known_hosts files:\n{}'
.format(result.stderr)
)
def vm_upload_ssh_key(vm, source_key_path, destination_key_name):
"""Copy ssh key to virtual machine ssh path and ensure proper permission is
set
:param robottelo.vm.VirtualMachine vm: Virtual machine instance
:param source_key_path: The ssh key file path to copy to vm
:param destination_key_name: The ssh key file name when copied to vm
"""
destination_key_path = '/root/.ssh/{0}'.format(destination_key_name)
upload_file(
local_file=source_key_path,
remote_file=destination_key_path,
hostname=vm.ip_addr
)
result = vm.run('chmod 600 {0}'.format(destination_key_path))
if result.return_code != 0:
raise CLIFactoryError(
u'Failed to chmod ssh key file:\n{}'.format(result.stderr))
def virt_who_hypervisor_config(
config_id, virt_who_vm, org_id=None, lce_id=None,
hypervisor_hostname=None, configure_ssh=False, hypervisor_user=None,
subscription_name=None, exec_one_shot=False, upload_manifest=True, extra_repos=None):
"""
Configure virtual machine as hypervisor virt-who service
:param int config_id: virt-who config id
:param robottelo.vm.VirtualMachine virt_who_vm: the Virtual machine
instance to use for configuration
:param int org_id: the organization id
:param int lce_id: the lifecycle environment id to use
:param str hypervisor_hostname: the hypervisor hostname
:param str hypervisor_user: hypervisor user that connect with the ssh key
:param bool configure_ssh: whether to configure the ssh key to allow this
virtual machine to connect to hypervisor
:param str subscription_name: the subscription name to assign to virt-who
hypervisor guests
:param bool exec_one_shot: whether to run the virt-who one-shot command
after startup
:param bool upload_manifest: whether to upload the organization manifest
:param list extra_repos: (Optional) a list of repositories dict options to setup additionally.
"""
if org_id is None:
org = make_org()
else:
org = Org.info({'id': org_id})
if lce_id is None:
lce = make_lifecycle_environment({'organization-id': org['id']})
else:
lce = LifecycleEnvironment.info({
'id': lce_id,
'organization-id': org['id']
})
if extra_repos is None:
extra_repos = []
repos = [
# Red Hat Satellite Tools
{
'product': PRDS['rhel'],
'repository-set': REPOSET['rhst7'],
'repository': REPOS['rhst7']['name'],
'repository-id': REPOS['rhst7']['id'],
'url': settings.sattools_repo['rhel7'],
'cdn': bool(settings.cdn or not settings.sattools_repo['rhel7']),
},
]
repos.extend(extra_repos)
content_setup_data = setup_cdn_and_custom_repos_content(
org['id'],
lce['id'],
repos,
upload_manifest=upload_manifest,
rh_subscriptions=[DEFAULT_SUBSCRIPTION_NAME],
)
activation_key = content_setup_data['activation_key']
content_view = content_setup_data['content_view']
setup_virtual_machine(
virt_who_vm,
org['label'],
activation_key=activation_key['name'],
patch_os_release_distro=DISTRO_RHEL7,
rh_repos_id=[
repo['repository-id']
for repo in repos if repo['cdn']
],
install_katello_agent=False,
)
# configure manually RHEL custom repo url as sync time is very big
# (more than 2 hours for RHEL 7Server) and not critical in this context.
rhel_repo_option_name = 'rhel{0}_repo'.format(DISTROS_MAJOR_VERSION[DISTRO_RHEL7])
rhel_repo_url = getattr(settings, rhel_repo_option_name, None)
if not rhel_repo_url:
raise ValueError('Settings option "{0}" is whether not set or does not exist'.format(
rhel_repo_option_name))
virt_who_vm.configure_rhel_repo(rhel_repo_url)
if hypervisor_hostname and configure_ssh:
# configure ssh access of hypervisor from virt_who_vm
hypervisor_ssh_key_name = 'hypervisor-{0}.key'.format(
gen_string('alpha').lower())
# upload the ssh key
vm_upload_ssh_key(
virt_who_vm, settings.server.ssh_key, hypervisor_ssh_key_name)
# setup the ssh config and known_hosts files
vm_setup_ssh_config(virt_who_vm, hypervisor_ssh_key_name,
hypervisor_hostname, user=hypervisor_user)
# upload the virt-who config deployment script
_, temp_virt_who_deploy_file_path = mkstemp(
suffix='-virt_who_deploy-{0}'.format(config_id),
dir=settings.tmp_dir,
)
VirtWhoConfig.fetch({
'id': config_id,
'output': temp_virt_who_deploy_file_path
})
download_file(
remote_file=temp_virt_who_deploy_file_path,
local_file=temp_virt_who_deploy_file_path,
hostname=settings.server.hostname
)
upload_file(
local_file=temp_virt_who_deploy_file_path,
remote_file=temp_virt_who_deploy_file_path,
hostname=virt_who_vm.ip_addr
)
# ensure the virt-who config deploy script is executable
result = virt_who_vm.run('chmod +x {0}'.format(
temp_virt_who_deploy_file_path))
if result.return_code != 0:
raise CLIFactoryError(
u'Failed to set deployment script as executable:\n{}'
.format(result.stderr)
)
# execute the deployment script
result = virt_who_vm.run('{0}'.format(temp_virt_who_deploy_file_path))
if result.return_code != 0:
raise CLIFactoryError(
u'Deployment script failure:\n{}'.format(result.stderr))
# after this step, we should have virt-who service installed and started
if exec_one_shot:
# usually to be sure that the virt-who generated the report we need
# to force a one shot report, for this we have to stop the virt-who
# service
result = virt_who_vm.run('service virt-who stop')
if result.return_code != 0:
raise CLIFactoryError(
u'Failed to stop the virt-who service:\n{}'
.format(result.stderr)
)
result = virt_who_vm.run('virt-who --one-shot', timeout=900)
if result.return_code != 0:
raise CLIFactoryError(
u'Failed when executing virt-who --one-shot:\n{}'
.format(result.stderr)
)
result = virt_who_vm.run('service virt-who start')
if result.return_code != 0:
raise CLIFactoryError(
u'Failed to start the virt-who service:\n{}'
.format(result.stderr)
)
# after this step the hypervisor as a content host should be created
# do not confuse virt-who host with hypervisor host as they can be
# diffrent hosts and as per this setup we have only registered the virt-who
# host, the hypervisor host should registered after virt-who send the
# first report when started or with one shot command
# the virt-who hypervisor will be registered to satellite with host name
# like "virt-who-{hypervisor_hostname}-{organization_id}"
virt_who_hypervisor_hostname = (
'virt-who-{0}-{1}'.format(hypervisor_hostname, org['id']))
# find the registered virt-who hypervisor host
org_hosts = Host.list({
'organization-id': org['id'],
'search': 'name={0}'.format(virt_who_hypervisor_hostname)
})
# Note: if one shot command was executed the report is immediately
# generated, and the server must have already registered the virt-who
# hypervisor host
if not org_hosts and not exec_one_shot:
# we have to wait until the first report was sent.
# the report is generated after the virt-who service startup, but some
# small delay can occur.
max_time = time.time() + 60
while time.time() <= max_time:
time.sleep(5)
org_hosts = Host.list({
'organization-id': org['id'],
'search': 'name={0}'.format(virt_who_hypervisor_hostname)
})
if org_hosts:
break
if len(org_hosts) == 0:
raise CLIFactoryError(
u'Failed to find hypervisor host:\n{}'.format(result.stderr))
virt_who_hypervisor_host = org_hosts[0]
subscription_id = None
if hypervisor_hostname and subscription_name:
subscriptions = Subscription.list({
u'organization-id': org_id},
per_page=False
)
for subscription in subscriptions:
if subscription['name'] == subscription_name:
subscription_id = subscription['id']
Host.subscription_attach({
'host': virt_who_hypervisor_hostname,
'subscription-id': subscription_id
})
break
return {
'subscription_id': subscription_id,
'subscription_name': subscription_name,
'activation_key_id': activation_key['id'],
'organization_id': org['id'],
'content_view_id': content_view['id'],
'lifecycle_environment_id': lce['id'],
'virt_who_hypervisor_host': virt_who_hypervisor_host,
}
|
omaciel/robottelo
|
robottelo/cli/factory.py
|
Python
|
gpl-3.0
| 177,458
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import icalendar
from lxml import html
from lxml.etree import ParserError
from werkzeug.urls import url_parse
from indico.core import signals
from indico.core.config import config
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.util.date_time import now_utc
from indico.util.signals import values_from_signal
def generate_basic_component(entity, uid=None, url=None):
"""Generate an iCalendar component with basic common properties.
:param entity: Event/session/contribution where properties come from
:param uid: UID for the component
:param url: URL for the component (defaults to `entity.external_url`)
:return: iCalendar event with basic properties
"""
component = icalendar.Event()
component.add('dtstamp', now_utc(False))
component.add('dtstart', entity.start_dt)
component.add('dtend', entity.end_dt)
component.add('summary', entity.title)
if uid:
component.add('uid', uid)
if url:
component.add('url', url)
elif hasattr(entity, 'external_url'):
component.add('url', entity.external_url)
location = (f'{entity.room_name} ({entity.venue_name})'
if entity.venue_name and entity.room_name
else (entity.venue_name or entity.room_name))
if location:
component.add('location', location)
speaker_list = getattr(entity, 'person_links', [])
description = []
if speaker_list:
speakers = [f'{x.full_name} ({x.affiliation})' if x.affiliation else x.full_name
for x in speaker_list]
description.append('Speakers: {}'.format(', '.join(speakers)))
if entity.description:
desc_text = str(entity.description) or '<p/>' # get rid of RichMarkup
try:
description.append(str(html.fromstring(desc_text).text_content()))
except ParserError:
# this happens if desc_text only contains a html comment
pass
if description:
component.add('description', '\n'.join(description))
return component
def generate_event_component(event, user=None):
"""Generate an event icalendar component from an Indico event."""
uid = f'indico-event-{event.id}@{url_parse(config.BASE_URL).host}'
component = generate_basic_component(event, uid)
# add contact information
contact_info = event.contact_emails + event.contact_phones
if contact_info:
component.add('contact', ';'.join(contact_info))
# add logo url if event is public
if event.effective_protection_mode == ProtectionMode.public and event.has_logo:
component.add('image', event.external_logo_url, {'VALUE': 'URI'})
# send description to plugins in case one wants to add anything to it
data = {'description': component.get('description', '')}
for update in values_from_signal(
signals.event.metadata_postprocess.send('ical-export', event=event, data=data, user=user),
as_list=True
):
data.update(update)
component.add('description', data['description'])
return component
def event_to_ical(event, user=None, detailed=False):
"""Serialize an event into an ical.
:param event: The event to serialize
:param user: The user who needs to be able to access the events
:param detailed: If True, iCal will include the event's contributions
"""
return events_to_ical([event], user, detailed)
def events_to_ical(events, user=None, detailed=False):
"""Serialize multiple events into an ical.
:param events: A list of events to serialize
:param user: The user who needs to be able to access the events
:param detailed: If True, iCal will include the event's contributions
"""
calendar = icalendar.Calendar()
calendar.add('version', '2.0')
calendar.add('prodid', '-//CERN//INDICO//EN')
for event in events:
if not detailed:
component = generate_event_component(event, user)
calendar.add_component(component)
else:
from indico.modules.events.contributions.ical import generate_contribution_component
components = [
generate_contribution_component(contrib)
for contrib in event.contributions
if contrib.start_dt
]
for component in components:
calendar.add_component(component)
return calendar.to_ical()
|
DirkHoffmann/indico
|
indico/modules/events/ical.py
|
Python
|
gpl-3.0
| 4,640
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ReportTemplate.type'
db.delete_column('lizard_htmlreport_reporttemplate', 'type')
# Adding field 'ReportTemplate.kind'
db.add_column('lizard_htmlreport_reporttemplate', 'kind', self.gf('django.db.models.fields.CharField')(default='algemeen', max_length=255), keep_default=False)
def backwards(self, orm):
# Adding field 'ReportTemplate.type'
db.add_column('lizard_htmlreport_reporttemplate', 'type', self.gf('django.db.models.fields.CharField')(default='algemeen', max_length=255), keep_default=False)
# Deleting field 'ReportTemplate.kind'
db.delete_column('lizard_htmlreport_reporttemplate', 'kind')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lizard_area.area': {
'Meta': {'ordering': "('name',)", 'object_name': 'Area', '_ormbases': ['lizard_area.Communique']},
'area_class': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'communique_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_area.Communique']", 'unique': 'True', 'primary_key': 'True'}),
'data_administrator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.DataAdministrator']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'})
},
'lizard_area.communique': {
'Meta': {'object_name': 'Communique', '_ormbases': ['lizard_geo.GeoObject']},
'code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'geoobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_geo.GeoObject']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'lizard_area.dataadministrator': {
'Meta': {'object_name': 'DataAdministrator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_geo.geoobject': {
'Meta': {'object_name': 'GeoObject'},
'geo_object_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_geo.GeoObjectGroup']"}),
'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'lizard_geo.geoobjectgroup': {
'Meta': {'object_name': 'GeoObjectGroup'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'source_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_htmlreport.generatedreport': {
'Meta': {'object_name': 'GeneratedReport'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']"}),
'data_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataSet']", 'null': 'True', 'blank': 'True'}),
'document_pdf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'document_rtf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'generated_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_htmlreport.ReportTemplate']"}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'lizard_htmlreport.reporttemplate': {
'Meta': {'object_name': 'ReportTemplate'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'default': "'algemeen'", 'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'lizard_security.dataset': {
'Meta': {'ordering': "['name']", 'object_name': 'DataSet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'})
}
}
complete_apps = ['lizard_htmlreport']
|
lizardsystem/lizard-htmlreport
|
lizard_htmlreport/migrations/0005_auto__del_field_reporttemplate_type__add_field_reporttemplate_kind.py
|
Python
|
gpl-3.0
| 8,531
|
import shlex
import sys
import contextlib
import nose
import six
from six import StringIO
from ipatests import util
from ipalib import api, errors
import pytest
if six.PY3:
unicode = str
TEST_ZONE = u'zoneadd.%(domain)s' % api.env
@pytest.mark.tier0
class TestCLIParsing(object):
"""Tests that commandlines are correctly parsed to Command keyword args
"""
def check_command(self, commandline, expected_command_name, **kw_expected):
argv = shlex.split(commandline)
executioner = api.Backend.cli
cmd = executioner.get_command(argv)
kw_got = executioner.parse(cmd, argv[1:])
kw_got = executioner.process_keyword_arguments(cmd, kw_got)
util.assert_deepequal(expected_command_name, cmd.name, 'Command name')
util.assert_deepequal(kw_expected, kw_got)
def run_command(self, command_name, **kw):
"""Run a command on the server"""
if not api.Backend.rpcclient.isconnected():
api.Backend.rpcclient.connect()
try:
api.Command[command_name](**kw)
except errors.NetworkError:
raise nose.SkipTest('%r: Server not available: %r' %
(self.__module__, api.env.xmlrpc_uri))
@contextlib.contextmanager
def fake_stdin(self, string_in):
"""Context manager that temporarily replaces stdin to read a string"""
old_stdin = sys.stdin
sys.stdin = StringIO(string_in)
yield
sys.stdin = old_stdin
def test_ping(self):
self.check_command('ping', 'ping')
def test_user_show(self):
self.check_command('user-show admin', 'user_show', uid=u'admin')
def test_user_show_underscore(self):
self.check_command('user_show admin', 'user_show', uid=u'admin')
def test_group_add(self):
self.check_command(
'group-add tgroup1 --desc="Test group"',
'group_add',
cn=u'tgroup1',
description=u'Test group',
)
def test_sudocmdgroup_add_member(self):
# Test CSV splitting is not done
self.check_command(
# The following is as it would appear on the command line:
r'sudocmdgroup-add-member tcmdgroup1 --sudocmds=ab,c --sudocmds=d',
'sudocmdgroup_add_member',
cn=u'tcmdgroup1',
sudocmd=[u'ab,c', u'd'],
)
def test_group_add_nonposix(self):
self.check_command(
'group-add tgroup1 --desc="Test group" --nonposix',
'group_add',
cn=u'tgroup1',
description=u'Test group',
nonposix=True,
)
def test_group_add_gid(self):
self.check_command(
'group-add tgroup1 --desc="Test group" --gid=1234',
'group_add',
cn=u'tgroup1',
description=u'Test group',
gidnumber=u'1234',
)
def test_group_add_interactive(self):
with self.fake_stdin('Test group\n'):
self.check_command(
'group-add tgroup1', 'group_add',
cn=u'tgroup1',
)
def test_dnsrecord_add(self):
self.check_command(
'dnsrecord-add %s ns --a-rec=1.2.3.4' % TEST_ZONE,
'dnsrecord_add',
dnszoneidnsname=TEST_ZONE,
idnsname=u'ns',
arecord=u'1.2.3.4',
)
def test_dnsrecord_del_all(self):
try:
self.run_command('dnszone_add', idnsname=TEST_ZONE)
except errors.NotFound:
raise nose.SkipTest('DNS is not configured')
try:
self.run_command('dnsrecord_add',
dnszoneidnsname=TEST_ZONE,
idnsname=u'ns', arecord=u'1.2.3.4', force=True)
with self.fake_stdin('yes\n'):
self.check_command(
'dnsrecord_del %s ns' % TEST_ZONE,
'dnsrecord_del',
dnszoneidnsname=TEST_ZONE,
idnsname=u'ns',
del_all=True,
)
with self.fake_stdin('YeS\n'):
self.check_command(
'dnsrecord_del %s ns' % TEST_ZONE,
'dnsrecord_del',
dnszoneidnsname=TEST_ZONE,
idnsname=u'ns',
del_all=True,
)
finally:
self.run_command('dnszone_del', idnsname=TEST_ZONE)
def test_dnsrecord_del_one_by_one(self):
try:
self.run_command('dnszone_add', idnsname=TEST_ZONE)
except errors.NotFound:
raise nose.SkipTest('DNS is not configured')
try:
records = (u'1 1 E3B72BA346B90570EED94BE9334E34AA795CED23',
u'2 1 FD2693C1EFFC11A8D2BE57229212A04B45663791')
for record in records:
self.run_command('dnsrecord_add',
dnszoneidnsname=TEST_ZONE, idnsname=u'ns',
sshfprecord=record)
with self.fake_stdin('no\nyes\nyes\n'):
self.check_command(
'dnsrecord_del %s ns' % TEST_ZONE,
'dnsrecord_del',
dnszoneidnsname=TEST_ZONE,
idnsname=u'ns',
sshfprecord=records,
)
finally:
self.run_command('dnszone_del', idnsname=TEST_ZONE)
def test_dnsrecord_add_ask_for_missing_fields(self):
sshfp_parts = (1, 1, u'E3B72BA346B90570EED94BE9334E34AA795CED23')
with self.fake_stdin('SSHFP\n%d\n%d\n%s' % sshfp_parts):
self.check_command(
'dnsrecord-add %s sshfp' % TEST_ZONE,
'dnsrecord_add',
dnszoneidnsname=TEST_ZONE,
idnsname=u'sshfp',
sshfp_part_fp_type=sshfp_parts[0],
sshfp_part_algorithm=sshfp_parts[1],
sshfp_part_fingerprint=sshfp_parts[2],
)
# test with lowercase record type
with self.fake_stdin('sshfp\n%d\n%d\n%s' % sshfp_parts):
self.check_command(
'dnsrecord-add %s sshfp' % TEST_ZONE,
'dnsrecord_add',
dnszoneidnsname=TEST_ZONE,
idnsname=u'sshfp',
sshfp_part_fp_type=sshfp_parts[0],
sshfp_part_algorithm=sshfp_parts[1],
sshfp_part_fingerprint=sshfp_parts[2],
)
# NOTE: when a DNS record part is passed via command line, it is not
# converted to its base type when transfered via wire
with self.fake_stdin('%d\n%s' % (sshfp_parts[1], sshfp_parts[2])):
self.check_command(
'dnsrecord-add %s sshfp --sshfp-algorithm=%d' % (
TEST_ZONE, sshfp_parts[0]),
'dnsrecord_add',
dnszoneidnsname=TEST_ZONE,
idnsname=u'sshfp',
sshfp_part_fp_type=sshfp_parts[0],
# passed via cmdline
sshfp_part_algorithm=unicode(sshfp_parts[1]),
sshfp_part_fingerprint=sshfp_parts[2],
)
with self.fake_stdin(sshfp_parts[2]):
self.check_command(
'dnsrecord-add %s sshfp --sshfp-algorithm=%d '
'--sshfp-fp-type=%d' % (
TEST_ZONE, sshfp_parts[0], sshfp_parts[1]),
'dnsrecord_add',
dnszoneidnsname=TEST_ZONE,
idnsname=u'sshfp',
# passed via cmdline
sshfp_part_fp_type=unicode(sshfp_parts[0]),
# passed via cmdline
sshfp_part_algorithm=unicode(sshfp_parts[1]),
sshfp_part_fingerprint=sshfp_parts[2],
)
def test_dnsrecord_del_comma(self):
try:
self.run_command(
'dnszone_add', idnsname=TEST_ZONE)
except errors.NotFound:
raise nose.SkipTest('DNS is not configured')
try:
self.run_command(
'dnsrecord_add',
dnszoneidnsname=TEST_ZONE,
idnsname=u'test',
txtrecord=u'"A pretty little problem," said Holmes.')
with self.fake_stdin('no\nyes\n'):
self.check_command(
'dnsrecord_del %s test' % TEST_ZONE,
'dnsrecord_del',
dnszoneidnsname=TEST_ZONE,
idnsname=u'test',
txtrecord=[u'"A pretty little problem," said Holmes.'])
finally:
self.run_command('dnszone_del', idnsname=TEST_ZONE)
def test_idrange_add(self):
"""
Test idrange-add with interative prompt
"""
def test_with_interactive_input():
with self.fake_stdin('5\n500000\n'):
self.check_command(
'idrange_add range1 --base-id=1 --range-size=1',
'idrange_add',
cn=u'range1',
ipabaseid=u'1',
ipaidrangesize=u'1',
ipabaserid=5,
ipasecondarybaserid=500000,
)
def test_with_command_line_options():
self.check_command(
'idrange_add range1 --base-id=1 --range-size=1 '
'--rid-base=5 --secondary-rid-base=500000',
'idrange_add',
cn=u'range1',
ipabaseid=u'1',
ipaidrangesize=u'1',
ipabaserid=u'5',
ipasecondarybaserid=u'500000',
)
def test_without_options():
self.check_command(
'idrange_add range1 --base-id=1 --range-size=1',
'idrange_add',
cn=u'range1',
ipabaseid=u'1',
ipaidrangesize=u'1',
)
adtrust_dn = 'cn=ADTRUST,cn=%s,cn=masters,cn=ipa,cn=etc,%s' % \
(api.env.host, api.env.basedn)
adtrust_is_enabled = api.Command['adtrust_is_enabled']()['result']
mockldap = None
if not adtrust_is_enabled:
# ipa-adtrust-install not run - no need to pass rid-base
# and secondary-rid-base
test_without_options()
# Create a mock service object to test against
adtrust_add = dict(
ipaconfigstring=b'enabledService',
objectclass=[b'top', b'nsContainer', b'ipaConfigObject']
)
mockldap = util.MockLDAP()
mockldap.add_entry(adtrust_dn, adtrust_add)
# Pass rid-base and secondary-rid-base interactively
test_with_interactive_input()
# Pass rid-base and secondary-rid-base on the command-line
test_with_command_line_options()
if not adtrust_is_enabled:
mockldap.del_entry(adtrust_dn)
|
ofayans/freeipa
|
ipatests/test_cmdline/test_cli.py
|
Python
|
gpl-3.0
| 10,902
|
VERSION = '0.01"
|
BurritoBob/GUIMiner-X11
|
version.py
|
Python
|
gpl-3.0
| 17
|
"""Exceptions raised by the employee browser app."""
class EmployeeExc(Exception):
"""Base class for all app exceptions.
Provides a common way to display exceptions. Subclass this class and define
a 'message' property. That message will get printf'd with the keyword
arguments provided to the constructor."""
message = "An unknown exception occurred"
def __init__(self, message=None, *args, **kwargs):
if not message:
message = self.message
if kwargs:
try:
message = message % kwargs
except Exception:
# format string and args don't match - ignore it and try to get
# out the underlying generic message.
pass
super(EmployeeExc, self).__init__(message)
class AuthFail(EmployeeExc):
message = "Authentication failed for user:%(login)s - %(reason)s"
class InconsistentDB(EmployeeExc):
message = "Records in the DB are not consistent"
class UnknownField(EmployeeExc):
message = "Unknown field:%(field)s specified"
|
lseek/empdb
|
employees/exceptions.py
|
Python
|
gpl-3.0
| 1,088
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 02 11:54:33 2012
@author: a1185872
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib.patches import Ellipse, Rectangle, Arrow
from matplotlib.colors import LinearSegmentedColormap, Normalize
import matplotlib.colorbar as mcb
import matplotlib.gridspec as gridspec
import mtpy1.core.z as Z
import mtpy1.utils.latlongutmconversion as ll2utm
# tolerance to find frequencies
ptol = .15
# error of data in percentage
zerr = .05
# errormap values which is multiplied by zerr to get a total error
zxxerrmap = 10
zxyerrmap = 1
zyxerrmap = 1
zyyerrmap = 10
zerrmap = [zxxerrmap, zxyerrmap, zyxerrmap, zyyerrmap]
#==============================================================================
# Colormaps for plots
#==============================================================================
# phase tensor map
ptcmapdict = {'red': ((0.0, 1.0, 1.0), (1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 1.0), (1.0, 0.0, 1.0)),
'blue': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0))}
ptcmap = LinearSegmentedColormap('ptcmap', ptcmapdict, 256)
# phase tensor map for difference (reverse)
ptcmapdictr = {'red': ((0.0, 1.0, 1.0), (1.0, 1.0, 1.0)),
'green': ((0.0, 1.0, 0.0), (1.0, 1.0, 0.0)),
'blue': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0))}
ptcmapr = LinearSegmentedColormap('ptcmapr', ptcmapdictr, 256)
# resistivity tensor map for calculating delta
ptcmapdict2 = {'red': ((0.0, 1.0, 0.0), (1.0, 1.0, 0.0)),
'green': ((0.0, 0.5, 0.5), (1.0, 0.5, 0.5)),
'blue': ((0.0, 0.5, 0.5), (1.0, 0.5, 0.5))}
ptcmap2 = LinearSegmentedColormap('ptcmap2', ptcmapdict2, 256)
# resistivity tensor map for calcluating resistivity difference
rtcmapdict = {'red': ((0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 1.0, 0.0)),
'green': ((0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0), (0.5, 1.0, 1.0), (1.0, 0.0, 0.0))}
rtcmap = LinearSegmentedColormap('rtcmap', rtcmapdict, 256)
# resistivity tensor map for calcluating apparent resistivity
rtcmapdictr = {'red': ((0.0, 1.0, 1.0), (0.5, 1.0, 1.0), (1.0, 0.0, 0.0)),
'green': ((0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 1.0, 1.0))}
rtcmapr = LinearSegmentedColormap('rtcmapr', rtcmapdictr, 256)
#==============================================================================
# define some helping functions
#==============================================================================
# make a class to pick periods
class ListPeriods:
def __init__(self, fig):
self.plst = []
self.fig = fig
self.count = 1
def connect(self):
self.cid = self.fig.canvas.mpl_connect('button_press_event',
self.onclick)
def onclick(self, event):
print '{0} Period: {1:.5g}'.format(self.count, event.xdata)
self.plst.append(event.xdata)
self.count += 1
def disconnect(self):
self.fig.canvas.mpl_disconnect(self.cid)
def readWLOutFile(outfn, ncol=5):
"""
read .out file from winglink
Inputs:
outfn = full path to .out file from winglink
Outputs:
dx,dy,dz = cell nodes in x,y,z directions (note x is to the East here
and y is to the north.)
"""
wingLinkDataFH = file(outfn, 'r')
raw_data = wingLinkDataFH.read().strip().split()
nx = int(raw_data[0])
ny = int(raw_data[1])
nz = int(raw_data[2])
dx = np.zeros(nx)
dy = np.zeros(ny)
dz = np.zeros(nz)
for x_idx in range(nx):
dx[x_idx] = raw_data[x_idx + 5]
for y_idx in range(ny):
dy[y_idx] = raw_data[y_idx + 5 + nx]
for z_idx in range(nz):
dz[z_idx] = raw_data[z_idx + 5 + nx + ny]
# dx[0:nx/2]=-dx[0:nx/2]
# dy[0:ny/2]=-dy[0:ny/2]
return dx, dy, dz
def readSitesFile(sitesfn):
"""
read sites_ file output from winglink
Input:
sitesfn = full path to the sites file output by winglink
Output:
slst = list of dictionaries for each station. Keys include:
station = station name
dx = number of blocks from center of grid in East-West direction
dy = number of blocks from center of grid in North-South direction
dz = number of blocks from center of grid vertically
number = block number in the grid
sitelst = list of station names
"""
sfid = file(sitesfn, 'r')
slines = sfid.readlines()
slst = []
sitelst = []
for ss in slines:
sdict = {}
sline = ss.strip().split()
sdict['station'] = sline[0][0:-4]
sdict['dx'] = int(sline[1]) - 1
sdict['dy'] = int(sline[2]) - 1
sdict['dz'] = int(sline[3]) - 1
sdict['something'] = int(sline[4])
sdict['number'] = int(sline[5])
slst.append(sdict)
sitelst.append(sline[0][0:-4])
return slst, sitelst
def getXY(sitesfn, outfn, ncol=5):
"""
get x (e-w) and y (n-s) position of station and put in middle of cell
Input:
sitesfn = full path to sites file output from winglink
outfn = full path to .out file output from winglink
ncol = number of columns the data is in
Outputs:
xarr = array of relative distance for each station from center of the
grid. Note this is E-W direction
yarr = array of relative distance for each station from center of the
grid. Note this is N-S direction
"""
slst, sitelst = readSitesFile(sitesfn)
dx, dy, dz = readWLOutFile(outfn, ncol=ncol)
ns = len(slst)
nxh = len(dx) / 2
nyh = len(dy) / 2
xarr = np.zeros(ns)
yarr = np.zeros(ns)
for ii, sdict in enumerate(slst):
xx = sdict['dx']
yy = sdict['dy']
if xx < nxh:
xarr[ii] = dx[xx:nxh].sum() - dx[xx] / 2
else:
xarr[ii] = dx[nxh:xx].sum() + dx[xx] / 2
if yy < nyh:
yarr[ii] = -1 * (dy[yy:nyh].sum() - dy[yy] / 2)
else:
yarr[ii] = -1 * (dy[nyh:yy].sum() + dy[yy] / 2)
return xarr, yarr
def getPeriods(edilst, errthresh=10):
"""
Plots periods for all stations in edipath and the plot is interactive, just
click on the period you want to select and it will appear in the console,
it will also be saved to lp.plst. To sort this list type lp.plst.sort()
The x's mark a conformation that the station contains that period. So
when looking for the best periods to invert for look for a dense line of
x's
Inputs:
edipath = path to where all your edi files are. Note that only the
impedance components are supported so if you have spectra data,
export them from wingling to have impedance information.
errthresh = threshold on the error in impedance estimation, this just
gives an indication on where bad stations and bad periods
are, anything above this level will be colored in red.
Outputs:
periodlst = list of periods for each station
errorlst = error in the impedance determinant for each station at
each period.
lp = data type lp has attributes:
plst = period list of chosen periods, again to sort this list type
lp.plst.sort(). this will then be the input to make the
data file later.
"""
plt.rcParams['font.size'] = 10
plt.rcParams['figure.subplot.left'] = .13
plt.rcParams['figure.subplot.right'] = .98
plt.rcParams['figure.subplot.bottom'] = .1
plt.rcParams['figure.subplot.top'] = .95
plt.rcParams['figure.subplot.wspace'] = .25
plt.rcParams['figure.subplot.hspace'] = .05
periodlst = []
errorlst = []
fig1 = plt.figure(5)
ax = fig1.add_subplot(1, 1, 1)
for edi in edilst:
if not os.path.isfile(edi):
print 'Could not find ' + edi
else:
z1 = Z.Z(edi)
periodlst.append(z1.period)
zdet = np.array([np.sqrt(abs(np.linalg.det(zz))) for zz in z1.z])
error = np.array([np.sqrt(abs(np.linalg.det(zz)))
for zz in z1.zvar])
perror = (error / zdet) * 100
errorlst.append(perror)
# make a plot to pick frequencies from showing period and percent
# error
ax.scatter(z1.period, perror, marker='x', picker=5)
pfind = np.where(perror > errthresh)[0]
if len(pfind) > 0:
print 'Error greater than {0:.3f} for '.format(errthresh) + z1.station
for jj in pfind:
ax.scatter(
z1.period[jj],
perror[jj],
marker='x',
color='r')
ax.text(z1.period[jj], perror[jj] * 1.05, z1.station,
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size': 8, 'color': 'red'})
print jj, z1.period[jj]
ax.set_xscale('log')
ax.set_xlim(10**np.floor(np.log10(z1.period[0])),
10**np.ceil(np.log10(z1.period[-1])))
ax.set_ylim(0, 3 * errthresh)
ax.set_yscale('log')
ax.set_xlabel('Period (s)', fontdict={'size': 12, 'weight': 'bold'})
ax.set_ylabel('Percent Error', fontdict={'size': 12, 'weight': 'bold'})
ax.grid('on', which='both')
lp = ListPeriods(fig1)
lp.connect()
plt.show()
return periodlst, errorlst, lp
def make3DGrid(edilst, xspacing=500, yspacing=500, z1layer=10, xpad=5, ypad=5,
zpad=5, xpadroot=5, ypadroot=5, zpadroot=2, zpadpow=(5, 15), nz=30,
plotyn='y', plotxlimits=None, plotylimits=None, plotzlimits=None):
"""
makes a grid from the edifiles to go into wsinv3d. The defaults usually
work relatively well, but it might take some effort to get a desired grid.
Inputs:
--------
**edilst** : list
list of full paths to the .edi files to be included in
the inversion.
**xspacing** : float
spacing of cells in the east-west direction in meters.
*default* is 500 (m)
**yspacing** : float
spacing of cells in the north-south direction in meters.
*default* is 500 (m)
**z1layer** : float
the depth of the first layer in the model in meters.
This is usually about 1/10th of your shallowest skin
depth.
*default* is 10 (m)
**xpad** : int
number of cells to pad on either side in the east-west
direction. The width of these cells grows exponentially
to the edge.
*default* is 5
**ypad** : int
number of cells to pad on either side in the north-south
direction. The width of these cells grows exponentially
to the edge.
*default* is 5
**zpad** : int
number of cells to pad on either side in the vertical
direction. This is to pad beneath the depth of
investigation and grows faster exponentially than the zone
of study. The purpose is to decrease the number of cells
in the model.
*default* is 5
**xpadroot** : float
the root number that is multiplied to itself for
calculating the width of the padding cells in the
east-west direction.
*default* is 5
**ypadroot** : float
the root number that is multiplied to itself for
calculating the width of the padding cells in the
north-south direction.
*default* is 5
**zpadroot** : float
the root number that is multiplied to itself for
calculating the width of the padding cells in the
vertical direction.
*default* is 2
**zpadpow** : tuple (min,max)
the power to which zpadroot is raised for the padding
cells in the vertical direction. Input as a tuple with
minimum power and maximum power.
*default* is (5,15)
**nz** : int
number of layers in the vertical direction. Remember that
the inversion code automatically adds 7 air layers to the
model which need to be used when estimating the memory that
it is going to take to run the model.
*default* is 30
**plotyn** : [ 'y' | 'n' ]
if plotyn=='y' then a plot showing map view (east:north)
and a cross sectional view (east:vertical) plane
* 'y' to plot the grid with station locations
* 'n' to suppress the plotting.
**plotxlimits** : tuple (xmin,xmax)
plot min and max distances in meters for the east-west
direction. If not input, the xlimits will be set to
the furthest stations east and west.
*default* is None
**plotylimits** : tuple (ymin,ymax)
plot min and max distances in meters for the east-west
direction. If not input, the ylimits will be set to
the furthest stations north and south.
*default* is None
**plotzlimits** : tuple (zmin,zmax)
plot min and max distances in meters for the east-west
direction. If not input, the zlimits will be set to
the nz layer and 0.
*default* is None
Returns:
--------
xgrid,ygrid,zgrid,locations,slst
**xgrid** : np.array
array of the east-west cell locations
**ygrid** : np.array
array of the north-south cell locations
**zgrid** : np.array
array of the vertical cell locations
**locations** : np.array (ns,2)
array of station locations placed in the center of
the cells.
* column 1 is for east-west locations
* column 2 is for the north-south location
**slst** : list
list of dictionaries for each station with keys:
* *'station'* for the station name
* *'east'* for easting in model coordinates
* *'east_c'* for easting in model coordinates to place
the station at the center of the cell
* *'north'* for northing in model coordinates
* *'north_c'* for northing in model coordinates to place
the station at the center of the cell
:Example: ::
>>> import mtpy.modeling.ws3dtools as ws
>>> import os
>>> edipath=r"/home/edifiles"
>>> edilst=[os.path.join(edipath,edi) for os.listdir(edipath)]
>>> xg,yg,zg,loc,statlst=ws.make3DGrid(edilst,plotzlimits=(-2000,200))
"""
ns = len(edilst)
locations = np.zeros((ns, 2))
slst = []
for ii, edi in enumerate(edilst):
zz = Z.Z(edi)
zone, east, north = ll2utm.LLtoUTM(23, zz.lat, zz.lon)
locations[ii, 0] = east
locations[ii, 1] = north
slst.append({'station': zz.station, 'east': east, 'north': north})
# estimate the mean distance to get into relative coordinates
xmean = locations[:, 0].mean()
ymean = locations[:, 1].mean()
# remove the average distance to get coordinates in a relative space
locations[:, 0] -= xmean
locations[:, 1] -= ymean
for sdict in slst:
sdict['east'] -= xmean
sdict['north'] -= ymean
# translate the stations so they are relative to 0,0
xcenter = (locations[:, 0].max() - np.abs(locations[:, 0].min())) / 2
ycenter = (locations[:, 1].max() - np.abs(locations[:, 1].min())) / 2
# remove the average distance to get coordinates in a relative space
locations[:, 0] -= xcenter
locations[:, 1] -= ycenter
for sdict in slst:
sdict['east'] -= xcenter
sdict['north'] -= ycenter
# pickout the furtherst south and west locations
# and put that station as the bottom left corner of the main grid
xleft = locations[:, 0].min() - xspacing / 2
xright = locations[:, 0].max() + xspacing / 2
ybottom = locations[:, 1].min() - yspacing / 2
ytop = locations[:, 1].max() + yspacing / 2
#---make a grid around the stations from the parameters above---
# make grid in east-west direction
midxgrid = np.arange(start=xleft, stop=xright + xspacing,
step=xspacing)
xpadleft = np.round(-xspacing * 5**np.arange(start=.5, stop=3, step=3. / xpad)) +\
xleft
xpadright = np.round(xspacing * 5**np.arange(start=.5, stop=3, step=3. / xpad)) +\
xright
xgridr = np.append(np.append(xpadleft[::-1], midxgrid), xpadright)
# make grid in north-south direction
midygrid = np.arange(start=ybottom, stop=ytop + yspacing,
step=yspacing)
ypadbottom = np.round(-yspacing * 5**np.arange(start=.5, stop=3, step=3. / xpad)) +\
ybottom
ypadtop = np.round(yspacing * 5**np.arange(start=.5, stop=3, step=3. / xpad)) +\
ytop
ygridr = np.append(np.append(ypadbottom[::-1], midygrid), ypadtop)
# make depth grid
zgrid1 = z1layer * \
2**np.round(np.arange(0, zpadpow[0], zpadpow[0] / (nz - zpad)))
zgrid2 = z1layer * 2**np.round(np.arange(zpadpow[0], zpadpow[1],
(zpadpow[1] - zpadpow[0]) / (zpad)))
zgrid = np.append(zgrid1, zgrid2)
#--Need to make an array of the individual cell dimensions for the wsinv3d
xnodes = xgridr.copy()
nx = xgridr.shape[0]
xnodes[:nx / 2] = np.array([abs(xgridr[ii] - xgridr[ii + 1])
for ii in range(int(nx / 2))])
xnodes[nx / 2:] = np.array([abs(xgridr[ii] - xgridr[ii + 1])
for ii in range(int(nx / 2) - 1, nx - 1)])
ynodes = ygridr.copy()
ny = ygridr.shape[0]
ynodes[:ny / 2] = np.array([abs(ygridr[ii] - ygridr[ii + 1])
for ii in range(int(ny / 2))])
ynodes[ny / 2:] = np.array([abs(ygridr[ii] - ygridr[ii + 1])
for ii in range(int(ny / 2) - 1, ny - 1)])
#--put the grids into coordinates relative to the center of the grid
xgrid = xnodes.copy()
xgrid[:int(nx / 2)] = -np.array([xnodes[ii:int(nx / 2)].sum()
for ii in range(int(nx / 2))])
xgrid[int(nx / 2):] = np.array([xnodes[int(nx / 2):ii + 1].sum()
for ii in range(int(nx / 2), nx)]) - xnodes[int(nx / 2)]
ygrid = ynodes.copy()
ygrid[:int(ny / 2)] = -np.array([ynodes[ii:int(ny / 2)].sum()
for ii in range(int(ny / 2))])
ygrid[int(ny / 2):] = np.array([ynodes[int(ny / 2):ii + 1].sum()
for ii in range(int(ny / 2), ny)]) - ynodes[int(ny / 2)]
# make sure that the stations are in the center of the cell as requested by
# the code.
for sdict in slst:
# look for the closest grid line
xx = [nn for nn, xf in enumerate(xgrid) if xf > (sdict['east'] - xspacing)
and xf < (sdict['east'] + xspacing)]
# shift the station to the center in the east-west direction
if xgrid[xx[0]] < sdict['east']:
sdict['east_c'] = xgrid[xx[0]] + xspacing / 2
elif xgrid[xx[0]] > sdict['east']:
sdict['east_c'] = xgrid[xx[0]] - xspacing / 2
# look for closest grid line
yy = [mm for mm, yf in enumerate(ygrid) if yf > (sdict['north'] - yspacing)
and yf < (sdict['north'] + yspacing)]
# shift station to center of cell in north-south direction
if ygrid[yy[0]] < sdict['north']:
sdict['north_c'] = ygrid[yy[0]] + yspacing / 2
elif ygrid[yy[0]] > sdict['north']:
sdict['north_c'] = ygrid[yy[0]] - yspacing / 2
#=Plot the data if desired=========================
if plotyn == 'y':
fig = plt.figure(1, figsize=[10, 10], dpi=300)
#---plot map view
ax1 = fig.add_subplot(1, 2, 1, aspect='equal')
for sdict in slst:
# make sure the station is in the center of the cell
ax1.scatter(sdict['east_c'], sdict['north_c'], marker='v')
for xp in xgrid:
ax1.plot([xp, xp], [ygrid.min(), ygrid.max()], color='k')
for yp in ygrid:
ax1.plot([xgrid.min(), xgrid.max()], [yp, yp], color='k')
if plotxlimits is None:
ax1.set_xlim(locations[:, 0].min() - 10 * xspacing,
locations[:, 0].max() + 10 * xspacing)
else:
ax1.set_xlim(plotxlimits)
if plotylimits is None:
ax1.set_ylim(locations[:, 1].min() - 50 * yspacing,
locations[:, 1].max() + 50 * yspacing)
else:
ax1.set_ylim(plotylimits)
ax1.set_ylabel('Northing (m)', fontdict={'size': 10, 'weight': 'bold'})
ax1.set_xlabel('Easting (m)', fontdict={'size': 10, 'weight': 'bold'})
# ----plot depth view
ax2 = fig.add_subplot(1, 2, 2, aspect='auto')
for xp in xgrid:
ax2.plot([xp, xp], [-zgrid.sum(), 0], color='k')
for sdict in slst:
ax2.scatter(sdict['east_c'], 0, marker='v')
for zz, zp in enumerate(zgrid):
ax2.plot([xgrid.min(), xgrid.max()], [-zgrid[0:zz].sum(),
-zgrid[0:zz].sum()], color='k')
if plotzlimits is None:
ax2.set_ylim(-zgrid1.max(), 200)
else:
ax2.set_ylim(plotzlimits)
if plotxlimits is None:
ax2.set_xlim(locations[:, 0].min() - xspacing,
locations[:, 0].max() + xspacing)
else:
ax2.set_xlim(plotxlimits)
ax2.set_ylabel('Depth (m)', fontdict={'size': 10, 'weight': 'bold'})
ax2.set_xlabel('Easting (m)', fontdict={'size': 10, 'weight': 'bold'})
plt.show()
print '-' * 15
print ' Number of stations = {0}'.format(len(slst))
print ' Dimensions: '
print ' e-w = {0}'.format(xgrid.shape[0])
print ' n-s = {0}'.format(ygrid.shape[0])
print ' z = {0}'.format(zgrid.shape[0])
print ' Extensions: '
print ' e-w = {0:.1f} (m)'.format(xgrid.__abs__().sum())
print ' n-s = {0:.1f} (m)'.format(ygrid.__abs__().sum())
print ' 0-z = {0:.1f} (m)'.format(zgrid.__abs__().sum())
print '-' * 15
return ynodes, xnodes, zgrid, locations, slst
def writeWSDataFile(periodlst, edilst, sitesfn=None, outfn=None,
sitelocations=None, zerr=.05,
ptol=.15, zerrmap=[10, 1, 1, 10], savepath=None, ncol=5,
units='mv'):
"""
writes a data file for WSINV3D from winglink outputs
Inputs:
--------
**periodlst** :list
periods to extract from edifiles, can get them from
using the function getPeriods.
**edilst** : list
list of full paths to .edi files to use for inversion
**sitelocations** : np.array (ns,2)
array of station locations where [:,0] corresponds
to the east-west location and [:,1] corresponds to
the north-south location. This can be found from
Make3DGrid. Locations are in meters in grid
coordinates.
**sitesfn** : string
if you used Winglink to make the model then you need to
input the sites filename (full path)
**outfn** : string
if you used Winglink to make the model need to input the
winglink .out file (full path)
**savepath** : string
directory or full path to save data file to, default
path is dirname sitesfn.
saves as: savepath/WSDataFile.dat
*Need to input if you did not use Winglink*
**zerr** : float
percent error to give to impedance tensor components in
decimal form --> 10% = 0.10
*default* is .05
**ptol** : float
percent tolerance to locate frequencies in case edi files
don't have the same frequencies. Need to add interpolation.
*default* is 0.15
**zerrmap** : tuple (zxx,zxy,zyx,zyy)
multiple to multiply err of zxx,zxy,zyx,zyy by.
Note the total error is zerr*zerrmap[ii]
**ncol** : int
number of columns in outfn, sometimes it outputs different
number of columns.
Returns:
--------
**datafn** : full path to data file, saved in dirname(sitesfn) or
savepath where savepath can be a directory or full
filename
"""
ns = len(edilst)
# get units correctly
if units == 'mv':
zconv = 1. / 796.
# create the output filename
if savepath is None:
ofile = os.path.join(os.path.dirname(sitesfn), 'WSDataFile.dat')
elif savepath.find('.') == -1:
ofile = os.path.join(savepath, 'WSDataFile.dat')
else:
ofile = savepath
# if there is a site file from someone who naively used winglink
if sitesfn is not None:
# read in stations from sites file
sitelst, slst = readSitesFile(sitesfn)
# get x and y locations on a relative grid
xlst, ylst = getXY(sitesfn, outfn, ncol=ncol)
# if the user made a grid in python or some other fashion
if sitelocations is not None:
if isinstance(sitelocations[0], dict):
xlst = np.zeros(ns)
ylst = np.zeros(ns)
slst = []
for dd, sd in enumerate(sitelocations):
xlst[dd] = sd['east_c']
ylst[dd] = sd['north_c']
slst.append(sd['station'])
else:
xlst = sitelocations[:, 0]
ylst = sitelocations[:, 1]
# define some lengths
nperiod = len(periodlst)
# make an array to put data into for easy writing
zarr = np.zeros((ns, nperiod, 4), dtype='complex')
#--------find frequencies-------------------------------------------------
linelst = []
for ss, edi in enumerate(edilst):
if not os.path.isfile(edi):
raise IOError('Could not find ' + edi)
z1 = Z.Z(edi)
sdict = {}
fspot = {}
for ff, f1 in enumerate(periodlst):
for kk, f2 in enumerate(z1.period):
if f2 >= (1 - ptol) * f1 and f2 <= (1 + ptol) * f1:
zderr = np.array([abs(z1.zvar[kk, nn, mm]) /
abs(z1.z[kk, nn, mm]) * 100
for nn in range(2) for mm in range(2)])
fspot['{0:.6g}'.format(f1)] = (kk, f2, zderr[0], zderr[1],
zderr[2], zderr[3])
zarr[ss, ff, :] = z1.z[kk].reshape(4,)
print z1.station, len(fspot)
sdict['fspot'] = fspot
sdict['station'] = z1.station
linelst.append(sdict)
#-----Write data file-----------------------------------------------------
ofid = file(ofile, 'w')
ofid.write('{0:d} {1:d} {2:d}\n'.format(ns, nperiod, 8))
# write N-S locations
ofid.write('Station_Location: N-S \n')
for ii in range(ns / 8 + 1):
for ll in range(8):
try:
ofid.write('{0:+.4e} '.format(ylst[ii * 8 + ll]))
except IndexError:
pass
ofid.write('\n')
# write E-W locations
ofid.write('Station_Location: E-W \n')
for ii in range(ns / 8 + 1):
for ll in range(8):
try:
ofid.write('{0:+.4e} '.format(xlst[ii * 8 + ll]))
except IndexError:
pass
ofid.write('\n')
# write impedance tensor components
for ii, p1 in enumerate(periodlst):
ofid.write('DATA_Period: {0:3.6f}\n'.format(p1))
for ss in range(ns):
zline = zarr[ss, ii, :]
for jj in range(4):
ofid.write('{0:+.4e} '.format(zline[jj].real * zconv))
ofid.write('{0:+.4e} '.format(-zline[jj].imag * zconv))
ofid.write('\n')
# write error as a percentage of Z
for ii, p1 in enumerate(periodlst):
ofid.write('ERROR_Period: {0:3.6f}\n'.format(p1))
for ss in range(ns):
zline = zarr[ss, ii, :]
for jj in range(4):
ofid.write('{0:+.4e} '.format(zline[jj].real * zerr * zconv))
ofid.write('{0:+.4e} '.format(zline[jj].imag * zerr * zconv))
ofid.write('\n')
# write error maps
for ii, p1 in enumerate(periodlst):
ofid.write('ERMAP_Period: {0:3.6f}\n'.format(p1))
for ss in range(ns):
zline = zarr[ss, ii, :]
for jj in range(4):
ofid.write('{0:.5e} '.format(zerrmap[jj]))
ofid.write('{0:.5e} '.format(zerrmap[jj]))
ofid.write('\n')
ofid.close()
print 'Wrote file to: ' + ofile
# write out places where errors are larger than error tolerance
errfid = file(os.path.join(os.path.dirname(ofile), 'DataErrorLocations.txt'),
'w')
errfid.write('Errors larger than error tolerance of: \n')
errfid.write('Zxx={0} Zxy={1} Zyx={2} Zyy={3} \n'.format(zerrmap[0] * zerr,
zerrmap[1] * zerr, zerrmap[2] * zerr, zerrmap[3] * zerr))
errfid.write('-' * 20 + '\n')
errfid.write('station T=period(s) Zij err=percentage \n')
for pfdict in linelst:
for kk, ff in enumerate(pfdict['fspot']):
if pfdict['fspot'][ff][2] > zerr * 100 * zerrmap[0]:
errfid.write(pfdict['station'] + ' T=' + ff +
' Zxx err={0:.3f} \n'.format(pfdict['fspot'][ff][2]))
if pfdict['fspot'][ff][3] > zerr * 100 * zerrmap[1]:
errfid.write(pfdict['station'] + ' T=' + ff +
' Zxy err={0:.3f} \n'.format(pfdict['fspot'][ff][3]))
if pfdict['fspot'][ff][4] > zerr * 100 * zerrmap[2]:
errfid.write(pfdict['station'] + ' T=' + ff +
' Zyx err={0:.3f} \n'.format(pfdict['fspot'][ff][4]))
if pfdict['fspot'][ff][5] > zerr * 100 * zerrmap[3]:
errfid.write(pfdict['station'] + ' T=' + ff +
' Zyy err={0:.3f} \n'.format(pfdict['fspot'][ff][5]))
errfid.close()
print 'Wrote errors lager than tolerance to: '
print os.path.join(os.path.dirname(ofile), 'DataErrorLocations.txt')
return ofile, linelst
def writeInit3DFile_wl(outfn, rhostart=100, ncol=5, savepath=None):
"""
Makes an init3d file for WSINV3D
Inputs:
outfn = full path to .out file from winglink
rhostart = starting homogeneous half space in Ohm-m
ncol = number of columns for data to be written in
savepath = full path to save the init file
Output:
ifile = full path to init file
"""
# create the output filename
if savepath is None:
ifile = os.path.join(os.path.dirname(outfn), 'init3d')
elif savepath.find('.') == -1:
ifile = os.path.join(savepath, 'init3d')
else:
ifile = savepath
dx, dy, dz = readWLOutFile(outfn, ncol=ncol)
nx = len(dx)
ny = len(dy)
nz = len(dz)
init_modelFH = open(ifile, 'w')
init_modelFH.write('#Initial model \n')
init_modelFH.write('%i %i %i 1 \n' % (ny, nx, nz))
# write y locations
y_string = ''
y_counter = 0
for y_idx in range(ny):
y_string += '%.3e ' % (dy[y_idx])
y_counter += 1
if y_counter == 8:
y_string += '\n'
y_counter = 0
if ny % 8:
y_string += '\n'
init_modelFH.write(y_string)
# write x locations
x_string = ''
x_counter = 0
for x_idx in range(nx):
x_string += '%.3e ' % (dx[x_idx])
x_counter += 1
if x_counter == 8:
x_string += '\n'
x_counter = 0
if nx % 8:
x_string += '\n'
init_modelFH.write(x_string)
# write z locations
z_string = ''
z_counter = 0
for z_idx in range(nz):
z_string += '%.3e ' % (dz[z_idx])
z_counter += 1
if z_counter == 8:
z_string += '\n'
z_counter = 0
if nz % 8:
z_string += '\n'
init_modelFH.write(z_string)
init_modelFH.write('%i \n' % int(rhostart))
init_modelFH.close()
print 'Wrote init file to: ' + ifile
return ifile
def writeInit3DFile(xgrid, ygrid, zgrid, savepath, reslst=100,
title='Initial File for WSINV3D', resmodel=None):
"""
will write an initial file for wsinv3d. At the moment can only make a
layered model that can then be manipulated later. Input for a layered
model is in layers which is [(layer1,layer2,resistivity index for reslst)]
Note that x is assumed to be S --> N, y is assumed to be W --> E and
z is positive downwards.
Also, the xgrid, ygrid and zgrid are assumed to be the relative distance
between neighboring nodes. This is needed because wsinv3d builds the
model from the bottom NW corner assuming the cell width from the init file.
Therefore the first line or index=0 is the southern most row of cells, so
if you build a model by hand the the layer block will look upside down if
you were to picture it in map view. Confusing, perhaps, but that is the
way it is.
Argumens:
----------
**xgrid** : np.array(nx)
block dimensions (m) in the N-S direction. **Note** that
the code reads the grid assuming that index=0 is the
southern most point.
**ygrid** : np.array(ny)
block dimensions (m) in the E-W direction. **Note** that
the code reads in the grid assuming that index=0 is the
western most point.
**zgrid** : np.array(nz)
block dimensions (m) in the vertical direction. This is
positive downwards.
**savepath** : string
Path to the director where the initial file will be saved
as savepath/init3d
**reslst** : float or list
The start resistivity as a float or a list of resistivities
that coorespond to the starting resistivity model
**resmodel**. This must be input if you input **resmodel**
**title** : string
Title that goes into the first line of savepath/init3d
**resmodel** : np.array((nx,ny,nz))
Starting resistivity model. Each cell is allocated an
integer value that cooresponds to the index value of
**reslst**. **Note** again that the modeling code
assumes that the first row it reads in is the southern
most row and the first column it reads in is the
western most column. Similarly, the first plane it
reads in is the Earth's surface.
Returns:
--------
**initfn** : full path to initial file
"""
if not isinstance(reslst, list):
reslst = [reslst]
if os.path.isdir(savepath) == True:
ifn = os.path.join(savepath, "init3d")
else:
ifn = os.path.join(savepath)
ifid = file(ifn, 'w')
ifid.write('# ' + title + '\n'.upper())
ifid.write('{0} {1} {2} {3}\n'.format(xgrid.shape[0], ygrid.shape[0],
zgrid.shape[0], len(reslst)))
# write S --> N node block
for ii, xx in enumerate(xgrid):
ifid.write('{0:>12}'.format('{:.1f}'.format(abs(xx))))
if ii != 0 and np.remainder(ii + 1, 5) == 0:
ifid.write('\n')
elif ii == xgrid.shape[0] - 1:
ifid.write('\n')
# write W --> E node block
for jj, yy in enumerate(ygrid):
ifid.write('{0:>12}'.format('{:.1f}'.format(abs(yy))))
if jj != 0 and np.remainder(jj + 1, 5) == 0:
ifid.write('\n')
elif jj == ygrid.shape[0] - 1:
ifid.write('\n')
# write top --> bottom node block
for kk, zz in enumerate(zgrid):
ifid.write('{0:>12}'.format('{:.1f}'.format(abs(zz))))
if kk != 0 and np.remainder(kk + 1, 5) == 0:
ifid.write('\n')
elif kk == zgrid.shape[0] - 1:
ifid.write('\n')
# write the resistivity list
for ff in reslst:
ifid.write('{0:.1f} '.format(ff))
ifid.write('\n')
# else:
if resmodel is None:
ifid.close()
else:
# get similar layers
l1 = 0
layers = []
for zz in range(zgrid.shape[0] - 1):
if (resmodel[:, :, zz] == resmodel[:, :, zz + 1]).all() == False:
layers.append((l1, zz))
l1 = zz + 1
# need to add on the bottom layers
layers.append((l1, zgrid.shape[0] - 1))
# write out the layers from resmodel
for ll in layers:
ifid.write('{0} {1}\n'.format(ll[0] + 1, ll[1] + 1))
for xx in range(xgrid.shape[0]):
for yy in range(ygrid.shape[0]):
ifid.write('{0:.0f} '.format(resmodel[xx, yy, ll[0]]))
ifid.write('\n')
print 'Wrote file to: ' + ifn
return ifn
def readInit3D(initfn):
"""
read an initial file and return the pertinent information including grid
positions in coordinates relative to the center point (0,0) and
starting model.
Arguments:
----------
**initfn** : full path to initializing file.
Returns:
--------
**xgrid** : np.array(nx)
array of nodes in S --> N direction
**ygrid** : np.array(ny)
array of nodes in the W --> E direction
**zgrid** : np.array(nz)
array of nodes in vertical direction positive downwards
**resistivitivityModel** : dictionary
dictionary of the starting model with keys as layers
**reslst** : list
list of resistivity values in the model
**titlestr** : string
title string
"""
ifid = file(initfn, 'r')
ilines = ifid.readlines()
ifid.close()
titlestr = ilines[0]
# get size of dimensions, remembering that x is N-S, y is E-W, z is + down
nsize = ilines[1].strip().split()
nx = int(nsize[0])
ny = int(nsize[1])
nz = int(nsize[2])
# initialize empy arrays to put things into
xnodes = np.zeros(nx)
ynodes = np.zeros(ny)
znodes = np.zeros(nz)
resmodel = np.zeros((nx, ny, nz))
# get the grid line locations
nn = 2
xx = 0
while xx < nx:
iline = ilines[nn].strip().split()
for xg in iline:
xnodes[xx] = float(xg)
xx += 1
nn += 1
yy = 0
while yy < ny:
iline = ilines[nn].strip().split()
for yg in iline:
ynodes[yy] = float(yg)
yy += 1
nn += 1
zz = 0
while zz < nz:
iline = ilines[nn].strip().split()
for zg in iline:
znodes[zz] = float(zg)
zz += 1
nn += 1
# put the grids into coordinates relative to the center of the grid
xgrid = xnodes.copy()
xgrid[:int(nx / 2)] = -np.array([xnodes[ii:int(nx / 2)].sum()
for ii in range(int(nx / 2))])
xgrid[int(nx / 2):] = np.array([xnodes[int(nx / 2):ii + 1].sum()
for ii in range(int(nx / 2), nx)]) - xnodes[int(nx / 2)]
ygrid = ynodes.copy()
ygrid[:int(ny / 2)] = -np.array([ynodes[ii:int(ny / 2)].sum()
for ii in range(int(ny / 2))])
ygrid[int(ny / 2):] = np.array([ynodes[int(ny / 2):ii + 1].sum()
for ii in range(int(ny / 2), ny)]) - ynodes[int(ny / 2)]
zgrid = np.array([znodes[:ii + 1].sum() for ii in range(nz)])
# get the resistivity values
reslst = [float(rr) for rr in ilines[nn].strip().split()]
nn += 1
# get model
iline = ilines[nn].strip().split()
if len(iline) == 0 or len(iline) == 1:
return xgrid, ygrid, zgrid, reslst, titlestr, resmodel
else:
while nn < len(ilines):
iline = ilines[nn].strip().split()
if len(iline) == 2:
l1 = int(iline[0]) - 1
l2 = int(iline[1])
nn += 1
xx = 0
elif len(iline) == 0:
break
else:
yy = 0
while yy < ny:
resmodel[xx, yy, l1:l2] = int(iline[yy])
# if l1==20:
# print nn,xx,yy,l1,l2,iline[yy]
yy += 1
xx += 1
nn += 1
return xgrid, ygrid, zgrid, reslst, titlestr, resmodel, xnodes, ynodes, znodes
def writeStartupFile(datafn, initialfn=None, outputfn=None, savepath=None,
apriorfn=None, modells=[5, 0.3, 0.3, 0.3], targetrms=1.0,
control=None, maxiter=10, errortol=None, staticfn=None,
lagrange=None):
"""
makes a startup file for WSINV3D t. Most of these parameters are not input
Inputs:
datafn = full path to the data file written for inversion
initialfn = full path to init file
outputfn = output stem to which the _model and _resp will be written
savepath = full path to save the startup file to
apriorfn = full path to apriori model
modells = smoothing parameters
targetrms = target rms
control = something
maxiter = maximum number of iterations
errotol = error tolerance for the computer?
staticfn = full path to static shift file name
lagrange = starting lagrange multiplier
Outputs:
sfile = full path to startup file
"""
# create the output filename
if savepath is None:
sfile = os.path.join(os.path.dirname(datafn), 'startup')
elif savepath.find('.') == -1:
sfile = os.path.join(savepath, 'startup')
else:
sfile = savepath
sfid = file(sfile, 'w')
sfid.write(
'DATA_FILE' +
' ' *
11 +
'../' +
os.path.basename(datafn) +
'\n')
if outputfn is None:
sfid.write('OUTPUT_FILE' + ' ' * 9 + 'Iter_ \n')
else:
sfid.write('OUTPUT_FILE' + ' ' * 9 + outputfn + ' \n')
if initialfn is None:
sfid.write('INITIAL_MODEL_FILE' + ' ' * 2 + '../init3d \n')
else:
sfid.write('INITIAL_MODEL_FILE' + ' ' * 2 + initialfn + ' \n')
if apriorfn is None:
sfid.write('PRIOR_MODEL_FILE' + ' ' * 4 + 'default \n')
else:
sfid.write('PRIOR_MODEL_FILE' + ' ' * 4 + apriorfn + ' \n')
if control is None:
sfid.write('CONTROL_MODEL_INDEX' + ' ' + 'default \n')
else:
sfid.write('CONTROL_MODEL_INDEX' + ' ' + control + ' \n')
sfid.write('TARGET_RMS' + ' ' * 10 + '{0} \n'.format(targetrms))
sfid.write('MAX_NO_ITERATION' + ' ' * 4 + '{0} \n'.format(maxiter))
sfid.write('MODEL_LENGTH_SCALE' + ' ' * 2 +
'{0} {1:.1f} {1:.1f} {1:.1f} \n'.format(modells[0], modells[1],
modells[2], modells[3]))
if lagrange is None:
sfid.write('LAGRANGE_INFO' + ' ' * 7 + 'default \n')
else:
sfid.write('LAGRANGE_INFO' + ' ' * 7 + lagrange + ' \n')
if errortol is None:
sfid.write('ERROR_TOL_LEVEL' + ' ' * 5 + 'default \n')
else:
sfid.write('ERROR_TOL_LEVEL' + ' ' * 5 + errortol + ' \n')
if staticfn is None:
sfid.write('STATIC_FILE' + ' ' * 9 + 'default \n')
else:
sfid.write('STATIC_FILE' + ' ' * 9 + staticfn + ' \n')
sfid.close()
print 'Wrote startup file to: ' + sfile
return sfile
def readDataFile(datafn, sitesfn=None, units='mv'):
"""
read in data file
Inputs:
datafn = full path to data file
sitesfn = full path to sites file output by winglink
units = 'mv' always
Outputs:
period = list of periods used for the inversion
zarr = array of impedance values
(number of staitons x number of periods x 2 x 2)
zerr = array of errors in impedance component
nsarr = station locations relative distance from center of grid in N-S
ewarr = station locations relative distance from center of grid in E-W
sitelst = list of sites used in data
"""
if units == 'mv':
zconv = 796.
else:
zconv = 1
dfid = file(datafn, 'r')
dlines = dfid.readlines()
# get size number of stations, number of frequencies, number of Z
# components
ns, nf, nz = np.array(dlines[0].strip().split(), dtype='int')
nsstart = 2
findlst = []
for ii, dline in enumerate(dlines[1:50], 1):
if dline.find('Station_Location: N-S') == 0:
findlst.append(ii)
elif dline.find('Station_Location: E-W') == 0:
findlst.append(ii)
elif dline.find('DATA_Period:') == 0:
findlst.append(ii)
ncol = len(dlines[nsstart].strip().split())
# print ncol
# nsstop=nsstart+ns/ncol+1
# ewstart=nsstop+1
# ewstop=ewstart+ns/ncol+1
# zstart=ewstop
# print nsstop,ewstart,ewstop,zstart
# get site names if entered a sites file
if sitesfn is not None:
slst, sitelst = readSitesFile(sitesfn)
else:
sitelst = np.arange(ns)
# get N-S locations
nsarr = np.zeros(ns)
for ii, dline in enumerate(dlines[findlst[0] + 1:findlst[1]], 0):
dline = dline.strip().split()
for jj in range(ncol):
try:
nsarr[ii * ncol + jj] = float(dline[jj])
except IndexError:
pass
except ValueError:
break
# get E-W locations
ewarr = np.zeros(ns)
for ii, dline in enumerate(dlines[findlst[1] + 1:findlst[2]], 0):
dline = dline.strip().split()
for jj in range(8):
try:
ewarr[ii * ncol + jj] = float(dline[jj])
except IndexError:
pass
except ValueError:
break
# make some empty array to put stuff into
period = np.zeros(nf)
zarr = np.zeros((ns, nf, 2, 2), dtype=np.complex)
zerr = np.zeros_like(zarr)
zerrmap = np.zeros_like(zarr)
# get data
pcount = 0
zcount = 0
for ii, dl in enumerate(dlines[findlst[2]:findlst[2] + nf * (ns + 1)]):
if dl.find('DATA_Period') == 0:
period[pcount] = float(dl.strip().split()[1])
kk = 0
pcount += 1
if ii == 0:
pass
else:
zcount += 1
else:
zline = np.array(dl.strip().split(), dtype=np.float) * zconv
zarr[kk, zcount, :, :] = np.array([[zline[0] - 1j * zline[1],
zline[2] - 1j * zline[3]],
[zline[4] - 1j * zline[5],
zline[6] - 1j * zline[7]]])
kk += 1
# if the data file is made from this program or is the input data file than
# get the errors from that file
if len(dlines) > 2 * nf * ns:
print 'Getting Error'
pecount = 0
zecount = 0
for ii, dl in enumerate(
dlines[findlst[2] + nf * (ns + 1):findlst[2] + 2 * nf * (ns + 1)]):
if dl.find('ERROR_Period') == 0:
kk = 0
pecount += 1
if ii == 0:
pass
else:
zecount += 1
else:
zline = np.array(dl.strip().split(), dtype=np.float) * zconv
zerr[kk, zecount, :, :] = np.array([[zline[0] - 1j * zline[1],
zline[2] - 1j * zline[3]],
[zline[4] - 1j * zline[5],
zline[6] - 1j * zline[7]]])
kk += 1
# get errormap values
if len(dlines) > 3 * nf * ns:
print 'Getting Error Map'
pmcount = 0
zmcount = 0
for ii, dl in enumerate(
dlines[findlst[2] + 2 * nf * (ns + 1):findlst[2] + 3 * nf * (ns + 1)]):
if dl.find('ERMAP_Period') == 0:
kk = 0
pmcount += 1
if ii == 0:
pass
else:
zmcount += 1
else:
# account for end of file empty lines
if len(dl.split()) > 2:
zline = np.array(dl.strip().split(), dtype=np.float)
zerrmap[kk, zmcount, :, :] = np.array([[zline[0] - 1j * zline[1],
zline[2] - 1j * zline[3]],
[zline[4] - 1j * zline[5],
zline[6] - 1j * zline[7]]])
kk += 1
# multiply errmap and error and convert from Ohm to mv/km nT
zerr = zerr * zerrmap
return period, zarr, zerr, nsarr, ewarr, sitelst
def plotDataResPhase(datafn, respfn=None, sitesfn=None, plottype='1', plotnum=1,
dpi=150, units='mv', colormode='color'):
"""
plot responses from the data file and if there is a response file
Inputs:
datafn = fullpath to data file
respfn = full path to respsonse file, if not input, just the data is
plotted. Can be a list of response files from the same
inversion
plottype= '1' to plot each station in a different window
[stations] for list of stations to plot (stations are numbers)
plotnum = 1 for just xy,yx
2 for all components
"""
# plot in color mode or black and white
if colormode == 'color':
# color for data
cted = (0, 0, 1)
ctmd = (1, 0, 0)
mted = '*'
mtmd = '*'
# color for occam model
ctem = (0, .3, 1.0)
ctmm = (1, .3, 0)
mtem = '+'
mtmm = '+'
elif colormode == 'bw':
# color for data
cted = (0, 0, 0)
ctmd = (0, 0, 0)
mted = '*'
mtmd = 'v'
# color for occam model
ctem = (0.6, .6, .6)
ctmm = (.6, .6, .6)
mtem = '+'
mtmm = 'x'
# load the data file
period, dz, dzerr, north, east, slst = readDataFile(datafn, sitesfn=sitesfn,
units=units)
# get shape of impedance tensors
ns, nf = dz.shape[0], dz.shape[1]
# read in response files
if respfn is not None:
rzlst = []
rzerrlst = []
if not isinstance(respfn, list):
respfn = [respfn]
for rfile in respfn:
period, rz, rzerr, north, east, slst = readDataFile(rfile, sitesfn=sitesfn,
units=units)
rzlst.append(rz)
rzerrlst.append(rzerr)
else:
rzlst = []
# get number of response files
nr = len(rzlst)
if isinstance(plottype, list):
ns = len(plottype)
plt.rcParams['font.size'] = 10
plt.rcParams['figure.subplot.left'] = .13
plt.rcParams['figure.subplot.right'] = .98
plt.rcParams['figure.subplot.bottom'] = .1
plt.rcParams['figure.subplot.top'] = .92
plt.rcParams['figure.subplot.wspace'] = .25
plt.rcParams['figure.subplot.hspace'] = .05
fontdict = {'size': 12, 'weight': 'bold'}
gs = gridspec.GridSpec(2, 2, height_ratios=[2, 1.5], hspace=.1)
if plottype != '1':
pstationlst = []
if not isinstance(plottype, list):
plottype = [plottype]
for ii, station in enumerate(slst):
if isinstance(station, str):
for pstation in plottype:
if station.find(str(pstation)) >= 0:
pstationlst.append(ii)
else:
for pstation in plottype:
if station == int(pstation):
pstationlst.append(ii)
else:
pstationlst = np.arange(ns)
for jj in pstationlst:
print 'Plotting: ' + str(slst[jj])
# check for masked points
dz[jj][np.where(dz[jj] == 7.95204E5 - 7.95204E5j)] = 0.0 + 0.0j
dzerr[jj][np.where(dz[jj] == 7.95204E5 - 7.95204E5j)] = 1.0 + 1.0j
# convert to apparent resistivity and phase
rp = Z.ResPhase(dz[jj], period, zvar=dzerr[jj])
# find locations where points have been masked
nzxx = np.where(rp.resxx != 0)[0]
nzxy = np.where(rp.resxy != 0)[0]
nzyx = np.where(rp.resyx != 0)[0]
nzyy = np.where(rp.resyy != 0)[0]
if respfn is not None:
plotr = True
else:
plotr = False
# make figure for xy,yx components
if plotnum == 1:
fig = plt.figure(jj, [10, 12], dpi=dpi)
gs.update(hspace=.1, wspace=.15, left=.1)
elif plotnum == 2:
fig = plt.figure(jj, [12, 12], dpi=dpi)
gs.update(hspace=.1, wspace=.15, left=.07)
#---------plot the apparent resistivity--------------------------------
if plotnum == 1:
ax = fig.add_subplot(gs[0, :])
ax2 = fig.add_subplot(gs[1, :], sharex=ax)
ax.yaxis.set_label_coords(-.055, 0.5)
ax2.yaxis.set_label_coords(-.055, 0.5)
elif plotnum == 2:
ax = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[1, 0], sharex=ax)
ax.yaxis.set_label_coords(-.075, 0.5)
ax2.yaxis.set_label_coords(-.075, 0.5)
fig.suptitle(str(slst[jj]), fontdict={'size': 15, 'weight': 'bold'})
erxy = ax.errorbar(period[nzxy], rp.resxy[nzxy], marker=mted, ms=4,
mfc='None', mec=cted, mew=1, ls=':',
yerr=rp.resxyerr[nzxy], ecolor=cted, color=cted)
eryx = ax.errorbar(period[nzyx], rp.resyx[nzyx], marker=mtmd, ms=4,
mfc='None', mec=ctmd, mew=1, ls=':',
yerr=rp.resyxerr[nzyx], ecolor=ctmd, color=ctmd)
if plotr == True:
for rr in range(nr):
if colormode == 'color':
cxy = (0, .4 + float(rr) / (3 * nr), 0)
cyx = (.7 + float(rr) / (4 * nr), .13, .63 -
float(rr) / (4 * nr))
elif colormode == 'bw':
cxy = (1 - 1.25 / (rr + 2.), 1 - 1.25 /
(rr + 2.), 1 - 1.25 / (rr + 2.))
cyx = (1 - 1.25 / (rr + 2.), 1 - 1.25 /
(rr + 2.), 1 - 1.25 / (rr + 2.))
rpr = Z.ResPhase(rzlst[rr][jj], period, zvar=rzerrlst[rr][jj])
# rms=np.sqrt(np.sum([abs(np.linalg.det(rp.z[ll])-
# np.linalg.det(rpr.z[ll]))**2
# for ll in range(len(rp.period))])/len(rp.period))
rms = np.sqrt(np.mean([(np.sqrt(abs(np.linalg.det(rp.z[ll]))) -
np.sqrt(abs(np.linalg.det(rpr.z[ll]))))**2
for ll in range(len(rp.period))]))
print 'RMS = {:.2f}'.format(rms)
erxyr = ax.errorbar(period[nzxy], rpr.resxy[nzxy], marker=mtem,
ms=8, mfc='None', mec=cxy, mew=1, ls='--',
yerr=rpr.resxyerr[nzxy],
ecolor=cxy, color=cxy)
eryxr = ax.errorbar(period[nzyx], rpr.resyx[nzyx], marker=mtmm,
ms=8, mfc='None', mec=cyx, mew=1, ls='--',
yerr=rpr.resyxerr[nzyx],
ecolor=cyx, color=cyx)
#ax.set_xlabel('Period (s)',fontdict=fontdict)
pylab.setp(ax.get_xticklabels(), visible=False)
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim(xmin=10**(np.floor(np.log10(period[0]))),
xmax=10**(np.ceil(np.log10(period[-1]))))
ax.grid(True, alpha=.25)
if plotr == True:
ax.legend((erxy[0], eryx[0], erxyr[0], eryxr[0]),
('Data $E_x/B_y$', 'Data $E_y/B_x$',
'Mod $E_x/B_y$', 'Mod $E_y/B_x$'),
loc=0, markerscale=1, borderaxespad=.01, labelspacing=.07,
handletextpad=.2, borderpad=.02)
else:
ax.legend((erxy[0], eryx[0]), ('$E_x/B_y$', '$E_y/B_x$'), loc=0,
markerscale=1, borderaxespad=.01, labelspacing=.07,
handletextpad=.2, borderpad=.02)
#-----Plot the phase---------------------------------------------------
ax2.errorbar(period[nzxy], rp.phasexy[nzxy], marker=mted, ms=4, mfc='None',
mec=cted, mew=1, ls=':', yerr=rp.phasexyerr[nzxy], ecolor=cted,
color=cted)
ax2.errorbar(period[nzyx], np.array(rp.phaseyx[nzyx]) + 180, marker=mtmd,
ms=4, mfc='None', mec=ctmd, mew=1, ls=':',
yerr=rp.phaseyxerr[nzyx],
ecolor=ctmd, color=ctmd)
if plotr == True:
for rr in range(nr):
if colormode == 'color':
cxy = (0, .4 + float(rr) / (3 * nr), 0)
cyx = (.7 + float(rr) / (4 * nr), .13, .63 -
float(rr) / (4 * nr))
elif colormode == 'bw':
cxy = (1 - 1.25 / (rr + 2.), 1 - 1.25 /
(rr + 2.), 1 - 1.25 / (rr + 2.))
cyx = (1 - 1.25 / (rr + 2.), 1 - 1.25 /
(rr + 2.), 1 - 1.25 / (rr + 2.))
rpr = Z.ResPhase(rzlst[rr][jj], period, zvar=rzerrlst[rr][jj])
ax2.errorbar(period[nzxy], rpr.phasexy[nzxy], marker=mtem, ms=8,
mfc='None', mec=cxy, mew=1, ls='--',
yerr=rp.phasexyerr[nzxy],
ecolor=cxy, color=cxy)
ax2.errorbar(period[nzyx], np.array(rpr.phaseyx[nzyx]) + 180,
marker=mtmm, ms=8, mfc='None', mec=cyx, mew=1, ls='--',
yerr=rp.phaseyxerr[nzyx], ecolor=cyx, color=cyx)
ax2.set_xlabel('Period (s)', fontdict)
ax2.set_ylabel('Phase (deg)', fontdict)
ax2.set_xscale('log')
# ax2.set_xlim(xmin=10**(np.floor(np.log10(period[0]))),
# xmax=10**(np.ceil(np.log10(period[-1]))))
# check the phase to see if any point are outside of [0:90]
if min(rp.phasexy) < 0 or min(rp.phaseyx + 180) < 0:
pymin = min([min(rp.phasexy), min(rp.phaseyx + 180)])
if pymin > 0:
pymin = 0
else:
pymin = 0
if max(rp.phasexy) > 90 or max(rp.phaseyx + 180) > 90:
pymax = min([max(rp.phasexy), max(rp.phaseyx + 180)])
if pymax < 91:
pymax = 90
else:
pymax = 90
ax2.set_ylim(ymin=pymin, ymax=pymax)
ax2.yaxis.set_major_locator(MultipleLocator(30))
ax2.yaxis.set_minor_locator(MultipleLocator(1))
ax2.grid(True, alpha=.25)
if plotnum == 2:
#---------plot the apparent resistivity----------------------------
ax3 = plt.subplot(gs[0, 1])
ax3.yaxis.set_label_coords(-.1, 0.5)
erxx = ax3.errorbar(period[nzxx], rp.resxx[nzxx], marker=mted, ms=4,
mfc='None', mec=cted, mew=1, ls=':',
yerr=rp.resxxerr[nzxx],
ecolor=cted, color=cted)
eryy = ax3.errorbar(period[nzyy], rp.resyy[nzyy], marker=mtmd, ms=4,
mfc='None', mec=ctmd, mew=1, ls=':',
yerr=rp.resyyerr[nzyy],
ecolor=ctmd, color=ctmd)
if plotr == True:
for rr in range(nr):
if colormode == 'color':
cxy = (0, .4 + float(rr) / (3 * nr), 0)
cyx = (.7 + float(rr) / (4 * nr), .13, .63 -
float(rr) / (4 * nr))
elif colormode == 'bw':
cxy = (1 - 1.25 / (rr + 2.), 1 - 1.25 /
(rr + 2.), 1 - 1.25 / (rr + 2.))
cyx = (1 - 1.25 / (rr + 2.), 1 - 1.25 /
(rr + 2.), 1 - 1.25 / (rr + 2.))
rpr = Z.ResPhase(
rzlst[rr][jj], period, zvar=rzerrlst[rr][jj])
erxxr = ax3.errorbar(period[nzxx], rpr.resxx[nzxx],
marker=mtem, ms=8, mfc='None', mec=cxy,
mew=1, ls='--', yerr=rpr.resxxerr[nzxx],
ecolor=cxy, color=cxy)
eryyr = ax3.errorbar(period[nzyy], rpr.resyy[nzyy],
marker=mtmm, ms=8, mfc='None', mec=cyx,
mew=1, ls='--', yerr=rpr.resyyerr[nzyy],
ecolor=cyx, color=cyx)
ax3.set_yscale('log')
ax3.set_xscale('log')
pylab.setp(ax3.get_xticklabels(), visible=False)
ax3.set_xlim(xmin=10**(np.floor(np.log10(period[0]))),
xmax=10**(np.ceil(np.log10(period[-1]))))
ax3.grid(True, alpha=.25)
if plotr == True:
ax3.legend((erxx[0], eryy[0], erxxr[0], eryyr[0]),
('Data $E_x/B_x$', 'Data $E_y/B_y$',
'Mod $E_x/B_x$', 'Mod $E_y/B_y$'),
loc=0, markerscale=1, borderaxespad=.01,
labelspacing=.07, handletextpad=.2, borderpad=.02)
else:
ax3.legend((erxx[0], eryy[0]), ('$E_x/B_x$', '$E_y/B_y$'), loc=0,
markerscale=1, borderaxespad=.01, labelspacing=.07,
handletextpad=.2, borderpad=.02)
#-----Plot the phase-----------------------------------------------
ax4 = plt.subplot(gs[1, 1], sharex=ax3)
ax4.yaxis.set_label_coords(-.1, 0.5)
ax4.errorbar(period[nzxx], rp.phasexx[nzxx], marker=mted, ms=4,
mfc='None', mec=cted, mew=1, ls=':',
yerr=rp.phasexxerr[nzxx], ecolor=cted, color=cted)
ax4.errorbar(period[nzyy], np.array(rp.phaseyy[nzyy]), marker=mtmd,
ms=4, mfc='None', mec=ctmd, mew=1, ls=':',
yerr=rp.phaseyyerr[nzyy],
ecolor=ctmd, color=ctmd)
if plotr == True:
for rr in range(nr):
if colormode == 'color':
cxy = (0, .4 + float(rr) / (3 * nr), 0)
cyx = (.7 + float(rr) / (4 * nr), .13, .63 -
float(rr) / (4 * nr))
elif colormode == 'bw':
cxy = (1 - 1.25 / (rr + 2.), 1 - 1.25 /
(rr + 2.), 1 - 1.25 / (rr + 2.))
cyx = (1 - 1.25 / (rr + 2.), 1 - 1.25 /
(rr + 2.), 1 - 1.25 / (rr + 2.))
rpr = Z.ResPhase(
rzlst[rr][jj], period, zvar=rzerrlst[rr][jj])
ax4.errorbar(period[nzxx], rpr.phasexx[nzxx], marker=mtem,
ms=8, mfc='None', mec=cxy, mew=1, ls='--',
yerr=rp.phasexxerr[nzxx],
ecolor=cxy, color=cxy)
ax4.errorbar(period[nzyy], np.array(rpr.phaseyy[nzyy]),
marker=mtmm, ms=8, mfc='None', mec=cyx, mew=1,
ls='--', yerr=rp.phaseyyerr[nzyy],
ecolor=cyx, color=cyx)
ax4.set_xlabel('Period (s)', fontdict)
#ax4.set_ylabel('Imepdance Phase (deg)',fontdict)
ax4.set_xscale('log')
# ax2.set_xlim(xmin=10**(np.floor(np.log10(period[0]))),
# xmax=10**(np.ceil(np.log10(period[-1]))))
ax4.set_ylim(ymin=-180, ymax=180)
ax4.yaxis.set_major_locator(MultipleLocator(30))
ax4.yaxis.set_minor_locator(MultipleLocator(5))
ax4.grid(True, alpha=.25)
def plotTensorMaps(datafn, respfn=None, sitesfn=None, periodlst=None,
esize=(1, 1, 5, 5), ecolor='phimin',
colormm=[(0, 90), (0, 1), (0, 4), (-2, 2)],
xpad=.500, units='mv', dpi=150):
"""
plot phase tensor maps for data and or response, each figure is of a
different period. If response is input a third column is added which is
the residual phase tensor showing where the model is not fitting the data
well. The data is plotted in km in units of ohm-m.
Inputs:
datafn = full path to data file
respfn = full path to response file, if none just plots data
sitesfn = full path to sites file
periodlst = indicies of periods you want to plot
esize = size of ellipses as:
0 = phase tensor ellipse
1 = phase tensor residual
2 = resistivity tensor ellipse
3 = resistivity tensor residual
ecolor = 'phimin' for coloring with phimin or 'beta' for beta coloring
colormm = list of min and max coloring for plot, list as follows:
0 = phase tensor min and max for ecolor in degrees
1 = phase tensor residual min and max [0,1]
2 = resistivity tensor coloring as resistivity on log scale
3 = resistivity tensor residual coloring as resistivity on
linear scale
xpad = padding of map from stations at extremities (km)
units = 'mv' to convert to Ohm-m
dpi = dots per inch of figure
"""
period, zd, zderr, nsarr, ewarr, sitelst = readDataFile(datafn, sitesfn=sitesfn,
units=units)
if respfn is not None:
period, zr, zrerr, nsarr, ewarr, sitelst = readDataFile(respfn, sitesfn=sitesfn,
units=units)
if periodlst is None:
periodlst = range(len(period))
# put locations into an logical coordinate system in km
nsarr = -nsarr / 1000
ewarr = -ewarr / 1000
# get coloring min's and max's
if colormm is not None:
ptmin, ptmax = (colormm[0][0] * np.pi / 180,
colormm[0][1] * np.pi / 180)
ptrmin, ptrmax = colormm[1]
rtmin, rtmax = colormm[2]
rtrmin, rtrmax = colormm[3]
else:
pass
# get ellipse sizes
ptsize = esize[0]
ptrsize = esize[1]
rtsize = esize[2]
rtrsize = esize[3]
plt.rcParams['font.size'] = 10
plt.rcParams['figure.subplot.left'] = .03
plt.rcParams['figure.subplot.right'] = .98
plt.rcParams['figure.subplot.bottom'] = .1
plt.rcParams['figure.subplot.top'] = .90
plt.rcParams['figure.subplot.wspace'] = .005
plt.rcParams['figure.subplot.hspace'] = .005
ns = zd.shape[0]
for ff, per in enumerate(periodlst):
print 'Plotting Period: {0:.5g}'.format(period[per])
fig = plt.figure(per + 1, dpi=dpi)
# get phase tensor
pt = Z.PhaseTensor(zd[:, per])
# get resistivity tensor
rt = Z.ResistivityTensor(zd[:, per], np.repeat(1. / period[per], ns))
if respfn is not None:
# get phase tensor and residual phase tensor
ptr = Z.PhaseTensor(zr[:, per])
ptd = Z.PhaseTensorResidual(zd[:, per], zr[:, per])
# get resistivity tensor and residual
rtr = Z.ResistivityTensor(
zr[:, per], np.repeat(1. / period[per], ns))
rtd = Z.ResistivityTensorResidual(zd[:, per], zr[:, per],
np.repeat(1. / period[per], ns))
if colormm is None:
if ecolor == 'phimin':
ptmin, ptmax = (ptr.phimin.min() / (np.pi / 2),
ptr.phimin.max() / (np.pi / 2))
elif ecolor == 'beta':
ptmin, ptmax = (ptr.beta.min(), ptr.beta.max())
ptrmin, ptrmax = (ptd.ecolor.min(), ptd.ecolor.max())
rtmin, rtmax = (np.log10(rtr.rhodet.min()),
np.log10(rtr.rhodet.max()))
rtrmin, rtrmax = rtd.rhodet.min(), rtd.rhodet.max()
# make subplots
ax1 = fig.add_subplot(2, 3, 1, aspect='equal')
ax2 = fig.add_subplot(2, 3, 2, aspect='equal')
ax3 = fig.add_subplot(2, 3, 3, aspect='equal')
ax4 = fig.add_subplot(2, 3, 4, aspect='equal')
ax5 = fig.add_subplot(2, 3, 5, aspect='equal')
ax6 = fig.add_subplot(2, 3, 6, aspect='equal')
for jj in range(ns):
#-----------plot data phase tensors---------------
eheightd = pt.phimin[jj] / ptr.phimax.max() * ptsize
ewidthd = pt.phimax[jj] / ptr.phimax.max() * ptsize
ellipd = Ellipse((ewarr[jj], nsarr[jj]), width=ewidthd,
height=eheightd, angle=pt.azimuth[jj])
# color ellipse:
if ecolor == 'phimin':
cvar = (pt.phimin[jj] / (np.pi / 2) -
ptmin) / (ptmax - ptmin)
if abs(cvar) > 1:
ellipd.set_facecolor((1, 0, .1))
else:
ellipd.set_facecolor((1, 1 - abs(cvar), .1))
if ecolor == 'beta':
cvar = (abs(pt.beta[jj]) - ptmin) / (ptmax - ptmin)
if abs(cvar) > 1:
ellipd.set_facecolor((1, 1, .1))
else:
ellipd.set_facecolor((1 - cvars, 1 - cvars, 1))
ax1.add_artist(ellipd)
#----------plot response phase tensors---------------------
eheightr = ptr.phimin[jj] / ptr.phimax.max() * ptsize
ewidthr = ptr.phimax[jj] / ptr.phimax.max() * ptsize
ellipr = Ellipse((ewarr[jj], nsarr[jj]), width=ewidthr,
height=eheightr, angle=ptr.azimuth[jj])
# color ellipse:
if ecolor == 'phimin':
cvar = (ptr.phimin[jj] / (np.pi / 2) -
ptmin) / (ptmax - ptmin)
if abs(cvar) > 1:
ellipr.set_facecolor((1, 0, .1))
else:
ellipr.set_facecolor((1, 1 - abs(cvar), .1))
if ecolor == 'beta':
cvar = (abs(ptr.beta[jj]) - ptmin) / (ptmax - ptmin)
if abs(cvar) > 1:
ellipr.set_facecolor((1, 1, .1))
else:
ellipr.set_facecolor((1 - cvars, 1 - cvars, 1))
ax2.add_artist(ellipr)
#--------plot residual phase tensors-------------
eheight = ptd.phimin[jj] / ptd.phimax.max() * ptrsize
ewidth = ptd.phimax[jj] / ptd.phimax.max() * ptrsize
ellip = Ellipse((ewarr[jj], nsarr[jj]), width=ewidth,
height=eheight, angle=ptd.azimuth[jj] - 90)
# color ellipse:
cvar = (ptd.ecolor[jj] - ptrmin) / (ptrmax - ptrmin)
if abs(cvar) > 1:
ellip.set_facecolor((0, 0, 0))
else:
ellip.set_facecolor((abs(cvar), .5, .5))
ax3.add_artist(ellip)
#-----------plot data resistivity tensors---------------
rheightd = rt.rhomin[jj] / rtr.rhomax.max() * rtsize
rwidthd = rt.rhomax[jj] / rtr.rhomax.max() * rtsize
rellipd = Ellipse((ewarr[jj], nsarr[jj]), width=rwidthd,
height=rheightd, angle=rt.rhoazimuth[jj])
# color ellipse:
cvar = (np.log10(rt.rhodet[jj]) - rtmin) / (rtmax - rtmin)
if cvar > .5:
if cvar > 1:
rellipd.set_facecolor((0, 0, 1))
else:
rellipd.set_facecolor(
(1 - abs(cvar), 1 - abs(cvar), 1))
else:
if cvar < -1:
rellipd.set_facecolor((1, 0, 0))
else:
rellipd.set_facecolor(
(1, 1 - abs(cvar), 1 - abs(cvar)))
ax4.add_artist(rellipd)
#----------plot response resistivity tensors-------------------
rheightr = rtr.rhomin[jj] / rtr.rhomax.max() * rtsize
rwidthr = rtr.rhomax[jj] / rtr.rhomax.max() * rtsize
rellipr = Ellipse((ewarr[jj], nsarr[jj]), width=rwidthr,
height=rheightr, angle=rtr.rhoazimuth[jj])
# color ellipse:
cvar = (np.log10(rtr.rhodet[jj]) - rtmin) / (rtmax - rtmin)
if cvar > .5:
if cvar > 1:
rellipr.set_facecolor((0, 0, 1))
else:
rellipr.set_facecolor(
(1 - abs(cvar), 1 - abs(cvar), 1))
else:
if cvar < -1:
rellipr.set_facecolor((1, 0, 0))
else:
rellipr.set_facecolor(
(1, 1 - abs(cvar), 1 - abs(cvar)))
ax5.add_artist(rellipr)
#--------plot residual resistivity tensors-------------
rheight = rtd.rhomin[jj] / rtd.rhomax.max() * rtrsize
rwidth = rtd.rhomax[jj] / rtd.rhomax.max() * rtrsize
rellip = Ellipse((ewarr[jj], nsarr[jj]), width=rwidth,
height=rheight, angle=rtd.azimuth[jj] - 90)
# color ellipse:
cvar = (rtd.rhodet[jj] - rtrmin) / (rtrmax - rtrmin)
if cvar < 0:
if cvar < -1:
rellip.set_facecolor((0, 0, 1))
else:
rellip.set_facecolor((1 - abs(cvar), 1 - abs(cvar), 1))
else:
if cvar > 1:
rellip.set_facecolor((1, 0, 0))
else:
rellip.set_facecolor((1, 1 - abs(cvar), 1 - abs(cvar)))
ax6.add_artist(rellip)
for aa, ax in enumerate([ax1, ax2, ax3, ax4, ax5, ax6]):
ax.set_xlim(ewarr.min() - xpad, ewarr.max() + xpad)
ax.set_ylim(nsarr.min() - xpad, nsarr.max() + xpad)
ax.grid('on')
if aa < 3:
pylab.setp(ax.get_xticklabels(), visible=False)
if aa == 0 or aa == 3:
pass
else:
pylab.setp(ax.get_yticklabels(), visible=False)
cbax = mcb.make_axes(
ax, shrink=.9, pad=.05, orientation='vertical')
if aa == 0 or aa == 1:
cbx = mcb.ColorbarBase(cbax[0], cmap=ptcmap,
norm=Normalize(vmin=ptmin * 180 / np.pi,
vmax=ptmax * 180 / np.pi),
orientation='vertical', format='%.2g')
cbx.set_label('Phase (deg)',
fontdict={'size': 7, 'weight': 'bold'})
if aa == 2:
cbx = mcb.ColorbarBase(cbax[0], cmap=ptcmap2,
norm=Normalize(vmin=ptrmin,
vmax=ptrmax),
orientation='vertical', format='%.2g')
cbx.set_label('$\Delta_{\Phi}$',
fontdict={'size': 7, 'weight': 'bold'})
if aa == 3 or aa == 4:
cbx = mcb.ColorbarBase(cbax[0], cmap=rtcmapr,
norm=Normalize(vmin=10**rtmin,
vmax=10**rtmax),
orientation='vertical', format='%.2g')
cbx.set_label('App. Res. ($\Omega \cdot$m)',
fontdict={'size': 7, 'weight': 'bold'})
if aa == 5:
cbx = mcb.ColorbarBase(cbax[0], cmap=rtcmap,
norm=Normalize(vmin=rtrmin,
vmax=rtrmax),
orientation='vertical', format='%.2g')
cbx.set_label('$\Delta_{rho}$',
fontdict={'size': 7, 'weight': 'bold'})
plt.show()
#----Plot Just the data------------------
else:
if colormm is None:
if ecolor == 'phimin':
ptmin, ptmax = (pt.phimin.min() / (np.pi / 2),
pt.phimin.max() / (np.pi / 2))
elif ecolor == 'beta':
ptmin, ptmax = (pt.beta.min(), pt.beta.max())
rtmin, rtmax = (np.log10(rt.rhodet.min()),
np.log10(rt.rhodet.max()))
ax1 = fig.add_subplot(1, 2, 1, aspect='equal')
ax2 = fig.add_subplot(1, 2, 2, aspect='equal')
for jj in range(ns):
#-----------plot data phase tensors---------------
# check for nan in the array cause it messes with the max
pt.phimax = np.nan_to_num(pt.phimax)
# scale the ellipse
eheightd = pt.phimin[jj] / pt.phimax.max() * ptsize
ewidthd = pt.phimax[jj] / pt.phimax.max() * ptsize
# make the ellipse
ellipd = Ellipse((ewarr[jj], nsarr[jj]), width=ewidthd,
height=eheightd, angle=pt.azimuth[jj])
# color ellipse:
if ecolor == 'phimin':
cvar = (pt.phimin[jj] / (np.pi / 2) -
ptmin) / (ptmax - ptmin)
if abs(cvar) > 1:
ellipd.set_facecolor((1, 0, .1))
else:
ellipd.set_facecolor((1, 1 - abs(cvar), .1))
if ecolor == 'beta':
cvar = (abs(pt.beta[jj]) - ptmin) / (ptmax - ptmin)
if abs(cvar) > 1:
ellipd.set_facecolor((1, 1, .1))
else:
ellipd.set_facecolor((1 - cvars, 1 - cvars, 1))
ax1.add_artist(ellipd)
#-----------plot data resistivity tensors---------------
rt.rhomax = np.nan_to_num(rt.rhomax)
rheightd = rt.rhomin[jj] / rt.rhomax.max() * rtsize
rwidthd = rt.rhomax[jj] / rt.rhomax.max() * rtsize
rellipd = Ellipse((ewarr[jj], nsarr[jj]), width=rwidthd,
height=rheightd, angle=rt.rhoazimuth[jj])
# color ellipse:
cvar = (np.log10(rt.rhodet[jj]) - rtmin) / (rtmax - rtmin)
if cvar > .5:
if cvar > 1:
rellipd.set_facecolor((0, 0, 1))
else:
rellipd.set_facecolor(
(1 - abs(cvar), 1 - abs(cvar), 1))
else:
if cvar < -1:
rellipd.set_facecolor((1, 0, 0))
else:
rellipd.set_facecolor(
(1, 1 - abs(cvar), 1 - abs(cvar)))
ax2.add_artist(rellipd)
for aa, ax in enumerate([ax1, ax2]):
ax.set_xlim(ewarr.min() - xpad, ewarr.max() + xpad)
ax.set_ylim(nsarr.min() - xpad, nsarr.max() + xpad)
ax.grid('on')
ax.set_xlabel('easting (km)', fontdict={'size': 10,
'weight': 'bold'})
if aa == 1:
pylab.setp(ax.get_yticklabels(), visible=False)
else:
ax.set_ylabel('northing (km)', fontdict={'size': 10,
'weight': 'bold'})
# cbax=mcb.make_axes(ax,shrink=.8,pad=.15,orientation='horizontal',
# anchor=(.5,1))
# l,b,w,h
# cbax=fig.add_axes([.1,.95,.35,.05])
if aa == 0:
cbax = fig.add_axes([.12, .97, .31, .02])
cbx = mcb.ColorbarBase(cbax, cmap=ptcmap,
norm=Normalize(vmin=ptmin * 180 / np.pi,
vmax=ptmax * 180 / np.pi),
orientation='horizontal', format='%.2g')
cbx.set_label('Phase (deg)',
fontdict={'size': 7, 'weight': 'bold'})
if aa == 1:
cbax = fig.add_axes([.59, .97, .31, .02])
cbx = mcb.ColorbarBase(cbax, cmap=rtcmapr,
norm=Normalize(vmin=10**rtmin,
vmax=10**rtmax),
orientation='horizontal', format='%.2g')
cbx.set_label('App. Res. ($\Omega \cdot$m)',
fontdict={'size': 7, 'weight': 'bold'})
cbx.set_ticks((10**rtmin, 10**rtmax))
plt.show()
def readModelFile(mfile, ncol=7):
"""
read in a model file as x-north, y-east, z-positive down
"""
mfid = file(mfile, 'r')
mlines = mfid.readlines()
# get info at the beggining of file
info = mlines[0].strip().split()
infodict = dict(
[(info[0][1:], info[1]), (info[2], info[3]), (info[4], info[5])])
# get lengths of things
nx, ny, nz, nn = np.array(mlines[1].strip().split(), dtype=np.int)
# make empty arrays to put stuff into
xarr = np.zeros(nx)
yarr = np.zeros(ny)
zarr = np.zeros(nz)
resarr = np.zeros((nx, ny, nz))
mm = 0
nn = 2
while mm < nx:
xline = mlines[nn].strip().split()
for xx in xline:
xarr[mm] = float(xx)
mm += 1
nn += 1
mm = 0
while mm < ny:
yline = mlines[nn].strip().split()
for yy in yline:
yarr[mm] = float(yy)
mm += 1
nn += 1
mm = 0
while mm < nz:
zline = mlines[nn].strip().split()
for zz in zline:
zarr[mm] = float(zz)
mm += 1
nn += 1
# put the grids into coordinates relative to the center of the grid
nsarr = xarr.copy()
nsarr[:int(nx / 2)] = -np.array([xarr[ii:int(nx / 2)].sum()
for ii in range(int(nx / 2))])
nsarr[int(nx / 2):] = np.array([xarr[int(nx / 2):ii + 1].sum()
for ii in range(int(nx / 2), nx)]) - xarr[int(nx / 2)]
ewarr = yarr.copy()
ewarr[:int(ny / 2)] = -np.array([yarr[ii:int(ny / 2)].sum()
for ii in range(int(ny / 2))])
ewarr[int(ny / 2):] = np.array([yarr[int(ny / 2):ii + 1].sum()
for ii in range(int(ny / 2), ny)]) - yarr[int(ny / 2)]
zdepth = np.array([zarr[0:ii + 1].sum() - zarr[0] for ii in range(nz)])
mm = 0
for kk in range(nz):
for jj in range(ny):
for ii in range(nx):
resarr[(nx - 1) - ii, jj, kk] = float(mlines[nn + mm].strip())
mm += 1
return nsarr, ewarr, zdepth, resarr, infodict
def plotDepthSlice(datafn, modelfn, savepath=None, map_scale='km', ew_limits=None,
ns_limits=None, depth_index=None, fig_dimensions=[4, 4],
dpi=300, font_size=7, climits=(0, 4), cmap='jet_r',
plot_grid='n', cb_dict={}):
"""
plot depth slices
"""
# create a path to save figure to if it doesn't already exist
if savepath is not None:
if not os.path.exists(savepath):
os.mkdir(savepath)
# make map scale
if map_scale == 'km':
dscale = 1000.
elif map_scale == 'm':
dscale = 1.
# read in data file to station locations
period, zz, zzerr, ns, ew, slst = readDataFile(datafn)
# scale the station locations to the desired units
ns /= dscale
ew /= dscale
# read in model file
x, y, z, resarr, idict = readModelFile(modelfn)
# scale the model grid to desired units
x /= dscale
y /= dscale
z /= dscale
# create an list of depth slices to plot
if depth_index is None:
zrange = range(z.shape[0])
elif isinstance(depth_index, int):
zrange = [depth_index]
elif isinstance(depth_index, list):
zrange = depth_index
# set the limits of the plot
if ew_limits is None:
xlimits = (np.floor(ew.min()), np.ceil(ew.max()))
else:
xlimits = ew_limits
if ns_limits is None:
ylimits = (np.floor(ns.min()), np.ceil(ns.max()))
else:
ylimits = ns_limits
# make a mesh grid of north and east
north1, east1 = np.meshgrid(x, y)
fdict = {'size': font_size + 2, 'weight': 'bold'}
cblabeldict = {-2: '$10^{-3}$', -1: '$10^{-1}$', 0: '$10^{0}$', 1: '$10^{1}$',
2: '$10^{2}$', 3: '$10^{3}$', 4: '$10^{4}$', 5: '$10^{5}$',
6: '$10^{6}$', 7: '$10^{7}$', 8: '$10^{8}$'}
plt.rcParams['font.size'] = font_size
for ii in zrange:
fig = plt.figure(ii, figsize=fig_dimensions, dpi=dpi)
plt.clf()
ax1 = fig.add_subplot(1, 1, 1, aspect='equal')
ax1.pcolormesh(east1, north1,
np.log10(np.rot90(resarr[:, :, ii], 3)),
cmap=cmap, vmin=climits[0], vmax=climits[1])
# plot the stations
for ee, nn in zip(ew, ns):
ax1.text(ee, nn, '*', verticalalignment='center',
horizontalalignment='center',
fontdict={'size': 5, 'weight': 'bold'})
# set axis properties
ax1.set_xlim(xlimits)
ax1.set_ylim(ylimits)
ax1.xaxis.set_minor_locator(MultipleLocator(100 * 1. / dscale))
ax1.yaxis.set_minor_locator(MultipleLocator(100 * 1. / dscale))
ax1.set_ylabel('Northing (' + map_scale + ')', fontdict=fdict)
ax1.set_xlabel('Easting (' + map_scale + ')', fontdict=fdict)
ax1.set_title('Depth = {:.3f} '.format(z[ii]) + '(' + map_scale + ')',
fontdict=fdict)
# plot the grid if desired
if plot_grid == 'y':
for xx in x:
ax1.plot([y.min(), y.max()], [xx, xx], lw=.1, color='k')
for yy in y:
ax1.plot([yy, yy], [x.min(), x.max()], lw=.1, color='k')
# plot the colorbar
try:
cb_dict['orientation']
except KeyError:
cb_dict['orientation'] = 'horizontal'
if cb_dict['orientation'] == 'horizontal':
try:
ax2 = fig.add_axes(cb_dict['position'])
except KeyError:
ax2 = fig.add_axes((ax1.axes.figbox.bounds[3] - .225,
ax1.axes.figbox.bounds[1] + .05, .3, .025))
elif cb_dict['orientation'] == 'vertical':
try:
ax2 = fig.add_axes(cb_dict['position'])
except KeyError:
ax2 = fig.add_axes((ax1.axes.figbox.bounds[2] - .15,
ax1.axes.figbox.bounds[3] - .21, .025, .3))
cb = mcb.ColorbarBase(ax2, cmap=cmap,
norm=Normalize(vmin=climits[0], vmax=climits[1]),
orientation=cb_dict['orientation'])
if cb_dict['orientation'] == 'horizontal':
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5, 1.3)
elif cb_dict['orientation'] == 'vertical':
cb.ax.yaxis.set_label_position('right')
cb.ax.yaxis.set_label_coords(1.25, .5)
cb.ax.yaxis.tick_left()
cb.ax.tick_params(axis='y', direction='in')
cb.set_label('Resistivity ($\Omega \cdot$m)',
fontdict={'size': font_size})
cb.set_ticks(np.arange(climits[0], climits[1] + 1))
cb.set_ticklabels([cblabeldict[cc]
for cc in np.arange(climits[0], climits[1] + 1)])
if savepath is not None:
fig.savefig(os.path.join(savepath,
"Depth_{}_{:.4f}.png".format(ii, z[ii])),
dpi=dpi)
fig.clear()
plt.close()
else:
pass
def computeMemoryUsage(nx, ny, nz, n_stations, n_zelements, n_period):
"""
compute the memory usage of a model
Arguments:
----------
**nx** : int
number of cells in N-S direction
**ny** : int
number of cells in E-W direction
**nz** : int
number of cells in vertical direction including air layers (7)
**n_stations** : int
number of stations
**n_zelements** : int
number of impedence tensor elements either 4 or 8
**n_period** : int
number of periods to invert for
Returns:
--------
**mem_req** : float
approximate memory useage in GB
"""
mem_req = 1.2 * (8 * (n_stations * n_period * n_zelements)**2 +
8 * (nx * ny * nz * n_stations * n_period * n_zelements))
return mem_req * 1E-9
|
MTgeophysics/mtpy
|
legacy/ws3dtools.py
|
Python
|
gpl-3.0
| 93,047
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
'''
@date: 2016-09-07
@author: Heysion Yuan
@copyright: 2016, Heysion Yuan <heysions@gmail.com>
@license: GPLv3
'''
import yaml
import json
from dabsv import HandlerBase
class DaemonLoginHandler(HandlerBase):
def get(self):
self.wirte("login failed")
pass
|
heysion/deepin-auto-build
|
dab/hub/manager/daemon.py
|
Python
|
gpl-3.0
| 320
|
# noinspection PyPackageRequirements
import wx
# noinspection PyPackageRequirements
import wx.html
_t = wx.GetTranslation
class ItemTraits(wx.Panel):
def __init__(self, parent, stuff, item):
wx.Panel.__init__(self, parent)
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(mainSizer)
self.traits = wx.html.HtmlWindow(self)
bgcolor = wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW)
fgcolor = wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT)
self.traits.SetPage("<body bgcolor='{}' text='{}'>{}</body>".format(
bgcolor.GetAsString(wx.C2S_HTML_SYNTAX),
fgcolor.GetAsString(wx.C2S_HTML_SYNTAX), item.traits.display))
self.traits.Bind(wx.EVT_CONTEXT_MENU, self.onPopupMenu)
self.traits.Bind(wx.EVT_KEY_UP, self.onKeyUp)
mainSizer.Add(self.traits, 1, wx.ALL | wx.EXPAND, 0)
self.Layout()
self.popupMenu = wx.Menu()
copyItem = wx.MenuItem(self.popupMenu, 1, _t('Copy'))
self.popupMenu.Append(copyItem)
self.popupMenu.Bind(wx.EVT_MENU, self.menuClickHandler, copyItem)
def onPopupMenu(self, event):
self.PopupMenu(self.popupMenu)
def menuClickHandler(self, event):
selectedMenuItem = event.GetId()
if selectedMenuItem == 1: # Copy was chosen
self.copySelectionToClipboard()
def onKeyUp(self, event):
keyCode = event.GetKeyCode()
# Ctrl + C
if keyCode == 67 and event.ControlDown():
self.copySelectionToClipboard()
# Ctrl + A
if keyCode == 65 and event.ControlDown():
self.traits.SelectAll()
def copySelectionToClipboard(self):
selectedText = self.traits.SelectionToText()
if selectedText == '': # if no selection, copy all content
selectedText = self.traits.ToText()
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(wx.TextDataObject(selectedText))
wx.TheClipboard.Close()
|
pyfa-org/Pyfa
|
gui/builtinItemStatsViews/itemTraits.py
|
Python
|
gpl-3.0
| 2,013
|
# Authors:
# Rob Crittenden <rcritten@redhat.com>
# Pavel Zuna <pzuna@redhat.com>
#
# Copyright (C) 2009 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import six
from ipalib import api
from ipalib import Int, Str, Flag
from ipalib.constants import PATTERN_GROUPUSER_NAME
from ipalib.plugable import Registry
from .baseldap import (
add_external_post_callback,
pkey_to_value,
remove_external_post_callback,
LDAPObject,
LDAPCreate,
LDAPUpdate,
LDAPDelete,
LDAPSearch,
LDAPRetrieve,
LDAPAddMember,
LDAPRemoveMember,
LDAPQuery,
)
from .idviews import remove_ipaobject_overrides
from . import baseldap
from ipalib import _, ngettext
from ipalib import errors
from ipalib import output
from ipapython.dn import DN
if six.PY3:
unicode = str
if api.env.in_server and api.env.context in ['lite', 'server']:
try:
import ipaserver.dcerpc
_dcerpc_bindings_installed = True
except ImportError:
_dcerpc_bindings_installed = False
__doc__ = _("""
Groups of users
Manage groups of users. By default, new groups are POSIX groups. You
can add the --nonposix option to the group-add command to mark a new group
as non-POSIX. You can use the --posix argument with the group-mod command
to convert a non-POSIX group into a POSIX group. POSIX groups cannot be
converted to non-POSIX groups.
Every group must have a description.
POSIX groups must have a Group ID (GID) number. Changing a GID is
supported but can have an impact on your file permissions. It is not necessary
to supply a GID when creating a group. IPA will generate one automatically
if it is not provided.
EXAMPLES:
Add a new group:
ipa group-add --desc='local administrators' localadmins
Add a new non-POSIX group:
ipa group-add --nonposix --desc='remote administrators' remoteadmins
Convert a non-POSIX group to posix:
ipa group-mod --posix remoteadmins
Add a new POSIX group with a specific Group ID number:
ipa group-add --gid=500 --desc='unix admins' unixadmins
Add a new POSIX group and let IPA assign a Group ID number:
ipa group-add --desc='printer admins' printeradmins
Remove a group:
ipa group-del unixadmins
To add the "remoteadmins" group to the "localadmins" group:
ipa group-add-member --groups=remoteadmins localadmins
Add multiple users to the "localadmins" group:
ipa group-add-member --users=test1 --users=test2 localadmins
Remove a user from the "localadmins" group:
ipa group-remove-member --users=test2 localadmins
Display information about a named group.
ipa group-show localadmins
External group membership is designed to allow users from trusted domains
to be mapped to local POSIX groups in order to actually use IPA resources.
External members should be added to groups that specifically created as
external and non-POSIX. Such group later should be included into one of POSIX
groups.
An external group member is currently a Security Identifier (SID) as defined by
the trusted domain. When adding external group members, it is possible to
specify them in either SID, or DOM\\name, or name@domain format. IPA will attempt
to resolve passed name to SID with the use of Global Catalog of the trusted domain.
Example:
1. Create group for the trusted domain admins' mapping and their local POSIX group:
ipa group-add --desc='<ad.domain> admins external map' ad_admins_external --external
ipa group-add --desc='<ad.domain> admins' ad_admins
2. Add security identifier of Domain Admins of the <ad.domain> to the ad_admins_external
group:
ipa group-add-member ad_admins_external --external 'AD\\Domain Admins'
3. Allow members of ad_admins_external group to be associated with ad_admins POSIX group:
ipa group-add-member ad_admins --groups ad_admins_external
4. List members of external members of ad_admins_external group to see their SIDs:
ipa group-show ad_admins_external
""")
register = Registry()
PROTECTED_GROUPS = (u'admins', u'trust admins', u'default smb group')
ipaexternalmember_param = Str('ipaexternalmember*',
cli_name='external',
label=_('External member'),
doc=_('Members of a trusted domain in DOM\\name or name@domain form'),
flags=['no_create', 'no_update', 'no_search'],
)
@register()
class group(LDAPObject):
"""
Group object.
"""
container_dn = api.env.container_group
object_name = _('group')
object_name_plural = _('groups')
object_class = ['ipausergroup']
object_class_config = 'ipagroupobjectclasses'
possible_objectclasses = ['posixGroup', 'mepManagedEntry', 'ipaExternalGroup']
permission_filter_objectclasses = ['posixgroup', 'ipausergroup']
search_attributes_config = 'ipagroupsearchfields'
default_attributes = [
'cn', 'description', 'gidnumber', 'member', 'memberof',
'memberindirect', 'memberofindirect', 'ipaexternalmember',
]
uuid_attribute = 'ipauniqueid'
attribute_members = {
'member': ['user', 'group'],
'memberof': ['group', 'netgroup', 'role', 'hbacrule', 'sudorule'],
'memberindirect': ['user', 'group'],
'memberofindirect': ['group', 'netgroup', 'role', 'hbacrule',
'sudorule'],
}
allow_rename = True
managed_permissions = {
'System: Read Groups': {
'replaces_global_anonymous_aci': True,
'ipapermbindruletype': 'anonymous',
'ipapermright': {'read', 'search', 'compare'},
'ipapermdefaultattr': {
'businesscategory', 'cn', 'description', 'gidnumber',
'ipaexternalmember', 'ipauniqueid', 'mepmanagedby', 'o',
'objectclass', 'ou', 'owner', 'seealso',
'ipantsecurityidentifier'
},
},
'System: Read Group Membership': {
'replaces_global_anonymous_aci': True,
'ipapermbindruletype': 'all',
'ipapermright': {'read', 'search', 'compare'},
'ipapermdefaultattr': {
'member', 'memberof', 'memberuid', 'memberuser', 'memberhost',
},
},
'System: Read External Group Membership': {
'ipapermbindruletype': 'all',
'ipapermright': {'read', 'search', 'compare'},
'ipapermdefaultattr': {
'ipaexternalmember',
},
},
'System: Add Groups': {
'ipapermright': {'add'},
'replaces': [
'(target = "ldap:///cn=*,cn=groups,cn=accounts,$SUFFIX")(version 3.0;acl "permission:Add Groups";allow (add) groupdn = "ldap:///cn=Add Groups,cn=permissions,cn=pbac,$SUFFIX";)',
],
'default_privileges': {'Group Administrators'},
},
'System: Modify Group Membership': {
'ipapermright': {'write'},
'ipapermtargetfilter': [
'(objectclass=ipausergroup)',
'(!(cn=admins))',
],
'ipapermdefaultattr': {'member'},
'replaces': [
'(targetattr = "member")(target = "ldap:///cn=*,cn=groups,cn=accounts,$SUFFIX")(version 3.0;acl "permission:Modify Group membership";allow (write) groupdn = "ldap:///cn=Modify Group membership,cn=permissions,cn=pbac,$SUFFIX";)',
'(targetfilter = "(!(cn=admins))")(targetattr = "member")(target = "ldap:///cn=*,cn=groups,cn=accounts,$SUFFIX")(version 3.0;acl "permission:Modify Group membership";allow (write) groupdn = "ldap:///cn=Modify Group membership,cn=permissions,cn=pbac,$SUFFIX";)',
],
'default_privileges': {
'Group Administrators', 'Modify Group membership'
},
},
'System: Modify External Group Membership': {
'ipapermright': {'write'},
'ipapermtargetfilter': [
'(objectclass=ipaexternalgroup)',
],
'ipapermdefaultattr': {'ipaexternalmember'},
'default_privileges': {
'Group Administrators', 'Modify Group membership'
},
},
'System: Modify Groups': {
'ipapermright': {'write'},
'ipapermdefaultattr': {
'cn', 'description', 'gidnumber', 'ipauniqueid',
'mepmanagedby', 'objectclass'
},
'replaces': [
'(targetattr = "cn || description || gidnumber || objectclass || mepmanagedby || ipauniqueid")(target = "ldap:///cn=*,cn=groups,cn=accounts,$SUFFIX")(version 3.0;acl "permission:Modify Groups";allow (write) groupdn = "ldap:///cn=Modify Groups,cn=permissions,cn=pbac,$SUFFIX";)',
],
'default_privileges': {'Group Administrators'},
},
'System: Remove Groups': {
'ipapermright': {'delete'},
'replaces': [
'(target = "ldap:///cn=*,cn=groups,cn=accounts,$SUFFIX")(version 3.0;acl "permission:Remove Groups";allow (delete) groupdn = "ldap:///cn=Remove Groups,cn=permissions,cn=pbac,$SUFFIX";)',
],
'default_privileges': {'Group Administrators'},
},
'System: Read Group Compat Tree': {
'non_object': True,
'ipapermbindruletype': 'anonymous',
'ipapermlocation': api.env.basedn,
'ipapermtarget': DN('cn=groups', 'cn=compat', api.env.basedn),
'ipapermright': {'read', 'search', 'compare'},
'ipapermdefaultattr': {
'objectclass', 'cn', 'memberuid', 'gidnumber',
},
},
'System: Read Group Views Compat Tree': {
'non_object': True,
'ipapermbindruletype': 'anonymous',
'ipapermlocation': api.env.basedn,
'ipapermtarget': DN('cn=groups', 'cn=*', 'cn=views', 'cn=compat', api.env.basedn),
'ipapermright': {'read', 'search', 'compare'},
'ipapermdefaultattr': {
'objectclass', 'cn', 'memberuid', 'gidnumber',
},
},
}
label = _('User Groups')
label_singular = _('User Group')
takes_params = (
Str('cn',
pattern=PATTERN_GROUPUSER_NAME,
pattern_errmsg='may only include letters, numbers, _, -, . and $',
maxlength=255,
cli_name='group_name',
label=_('Group name'),
primary_key=True,
normalizer=lambda value: value.lower(),
),
Str('description?',
cli_name='desc',
label=_('Description'),
doc=_('Group description'),
),
Int('gidnumber?',
cli_name='gid',
label=_('GID'),
doc=_('GID (use this option to set it manually)'),
minvalue=1,
),
ipaexternalmember_param,
)
@register()
class group_add(LDAPCreate):
__doc__ = _('Create a new group.')
msg_summary = _('Added group "%(value)s"')
takes_options = LDAPCreate.takes_options + (
Flag('nonposix',
cli_name='nonposix',
doc=_('Create as a non-POSIX group'),
default=False,
),
Flag('external',
cli_name='external',
doc=_('Allow adding external non-IPA members from trusted domains'),
default=False,
),
)
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options):
# As both 'external' and 'nonposix' options have default= set for
# them, they will always be present in options dict, thus we can
# safely reference the values
assert isinstance(dn, DN)
if options['external']:
entry_attrs['objectclass'].append('ipaexternalgroup')
if 'gidnumber' in options:
raise errors.MutuallyExclusiveError(reason=_('gid cannot be set for external group'))
elif not options['nonposix']:
entry_attrs['objectclass'].append('posixgroup')
if 'gidnumber' not in options:
entry_attrs['gidnumber'] = baseldap.DNA_MAGIC
return dn
@register()
class group_del(LDAPDelete):
__doc__ = _('Delete group.')
msg_summary = _('Deleted group "%(value)s"')
def pre_callback(self, ldap, dn, *keys, **options):
assert isinstance(dn, DN)
config = ldap.get_ipa_config()
def_primary_group = config.get('ipadefaultprimarygroup', '')
def_primary_group_dn = self.obj.get_dn(def_primary_group)
if dn == def_primary_group_dn:
raise errors.DefaultGroupError()
group_attrs = self.obj.methods.show(
self.obj.get_primary_key_from_dn(dn), all=True
)['result']
if keys[0] in PROTECTED_GROUPS:
raise errors.ProtectedEntryError(label=_(u'group'), key=keys[0],
reason=_(u'privileged group'))
if 'mepmanagedby' in group_attrs:
raise errors.ManagedGroupError()
# Remove any ID overrides tied with this group
remove_ipaobject_overrides(ldap, self.obj.api, dn)
return dn
def post_callback(self, ldap, dn, *keys, **options):
assert isinstance(dn, DN)
try:
api.Command['pwpolicy_del'](keys[-1])
except errors.NotFound:
pass
return True
@register()
class group_mod(LDAPUpdate):
__doc__ = _('Modify a group.')
msg_summary = _('Modified group "%(value)s"')
takes_options = LDAPUpdate.takes_options + (
Flag('posix',
cli_name='posix',
doc=_('change to a POSIX group'),
),
Flag('external',
cli_name='external',
doc=_('change to support external non-IPA members from trusted domains'),
default=False,
),
)
def pre_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
is_protected_group = keys[-1] in PROTECTED_GROUPS
if 'rename' in options or 'cn' in entry_attrs:
if is_protected_group:
raise errors.ProtectedEntryError(label=u'group', key=keys[-1],
reason=u'Cannot be renamed')
if ('posix' in options and options['posix']) or 'gidnumber' in options:
old_entry_attrs = ldap.get_entry(dn, ['objectclass'])
dn = old_entry_attrs.dn
if 'ipaexternalgroup' in old_entry_attrs['objectclass']:
raise errors.ExternalGroupViolation()
if 'posixgroup' in old_entry_attrs['objectclass']:
if options['posix']:
raise errors.AlreadyPosixGroup()
else:
old_entry_attrs['objectclass'].append('posixgroup')
entry_attrs['objectclass'] = old_entry_attrs['objectclass']
if 'gidnumber' not in options:
entry_attrs['gidnumber'] = baseldap.DNA_MAGIC
if options['external']:
if is_protected_group:
raise errors.ProtectedEntryError(label=u'group', key=keys[-1],
reason=u'Cannot support external non-IPA members')
old_entry_attrs = ldap.get_entry(dn, ['objectclass'])
dn = old_entry_attrs.dn
if 'posixgroup' in old_entry_attrs['objectclass']:
raise errors.PosixGroupViolation()
if 'ipaexternalgroup' in old_entry_attrs['objectclass']:
raise errors.AlreadyExternalGroup()
else:
old_entry_attrs['objectclass'].append('ipaexternalgroup')
entry_attrs['objectclass'] = old_entry_attrs['objectclass']
# Can't check for this in a validator because we lack context
if 'gidnumber' in options and options['gidnumber'] is None:
raise errors.RequirementError(name='gidnumber')
return dn
def exc_callback(self, keys, options, exc, call_func, *call_args, **call_kwargs):
# Check again for GID requirement in case someone tried to clear it
# using --setattr.
if call_func.__name__ == 'update_entry':
if isinstance(exc, errors.ObjectclassViolation):
if 'gidNumber' in str(exc) and 'posixGroup' in str(exc):
raise errors.RequirementError(name='gidnumber')
raise exc
@register()
class group_find(LDAPSearch):
__doc__ = _('Search for groups.')
member_attributes = ['member', 'memberof']
msg_summary = ngettext(
'%(count)d group matched', '%(count)d groups matched', 0
)
takes_options = LDAPSearch.takes_options + (
Flag('private',
cli_name='private',
doc=_('search for private groups'),
),
Flag('posix',
cli_name='posix',
doc=_('search for POSIX groups'),
),
Flag('external',
cli_name='external',
doc=_('search for groups with support of external non-IPA members from trusted domains'),
),
Flag('nonposix',
cli_name='nonposix',
doc=_('search for non-POSIX groups'),
),
)
def pre_callback(self, ldap, filter, attrs_list, base_dn, scope,
criteria=None, **options):
assert isinstance(base_dn, DN)
# filter groups by pseudo type
filters = []
if options['posix']:
search_kw = {'objectclass': ['posixGroup']}
filters.append(ldap.make_filter(search_kw, rules=ldap.MATCH_ALL))
if options['external']:
search_kw = {'objectclass': ['ipaExternalGroup']}
filters.append(ldap.make_filter(search_kw, rules=ldap.MATCH_ALL))
if options['nonposix']:
search_kw = {'objectclass': ['posixGroup' , 'ipaExternalGroup']}
filters.append(ldap.make_filter(search_kw, rules=ldap.MATCH_NONE))
# if looking for private groups, we need to create a new search filter,
# because private groups have different object classes
if options['private']:
# filter based on options, oflt
search_kw = self.args_options_2_entry(**options)
search_kw['objectclass'] = ['posixGroup', 'mepManagedEntry']
oflt = ldap.make_filter(search_kw, rules=ldap.MATCH_ALL)
# filter based on 'criteria' argument
search_kw = {}
config = ldap.get_ipa_config()
attrs = config.get(self.obj.search_attributes_config, [])
if len(attrs) == 1 and isinstance(attrs[0], six.string_types):
search_attrs = attrs[0].split(',')
for a in search_attrs:
search_kw[a] = criteria
cflt = ldap.make_filter(search_kw, exact=False)
filter = ldap.combine_filters((oflt, cflt), rules=ldap.MATCH_ALL)
elif filters:
filters.append(filter)
filter = ldap.combine_filters(filters, rules=ldap.MATCH_ALL)
return (filter, base_dn, scope)
@register()
class group_show(LDAPRetrieve):
__doc__ = _('Display information about a named group.')
def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
if ('ipaexternalmember' in entry_attrs and
len(entry_attrs['ipaexternalmember']) > 0 and
'trust_resolve' in self.Command and
not options.get('raw', False)):
sids = entry_attrs['ipaexternalmember']
result = self.Command.trust_resolve(sids=sids)
for entry in result['result']:
try:
idx = sids.index(entry['sid'][0])
sids[idx] = entry['name'][0]
except ValueError:
pass
return dn
@register()
class group_add_member(LDAPAddMember):
__doc__ = _('Add members to a group.')
takes_options = (ipaexternalmember_param,)
def post_callback(self, ldap, completed, failed, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
result = (completed, dn)
if 'ipaexternalmember' in options:
if not _dcerpc_bindings_installed:
raise errors.NotFound(reason=_('Cannot perform external member validation without '
'Samba 4 support installed. Make sure you have installed '
'server-trust-ad sub-package of IPA on the server'))
domain_validator = ipaserver.dcerpc.DomainValidator(self.api)
if not domain_validator.is_configured():
raise errors.NotFound(reason=_('Cannot perform join operation without own domain configured. '
'Make sure you have run ipa-adtrust-install on the IPA server first'))
sids = []
failed_sids = []
for sid in options['ipaexternalmember']:
if domain_validator.is_trusted_sid_valid(sid):
sids.append(sid)
else:
try:
actual_sid = domain_validator.get_trusted_domain_object_sid(sid)
except errors.PublicError as e:
failed_sids.append((sid, e.strerror))
else:
sids.append(actual_sid)
restore = []
if 'member' in failed and 'group' in failed['member']:
restore = failed['member']['group']
failed['member']['group'] = list((id, id) for id in sids)
result = add_external_post_callback(ldap, dn, entry_attrs,
failed=failed,
completed=completed,
memberattr='member',
membertype='group',
externalattr='ipaexternalmember',
normalize=False)
failed['member']['group'] += restore + failed_sids
return result
@register()
class group_remove_member(LDAPRemoveMember):
__doc__ = _('Remove members from a group.')
takes_options = (ipaexternalmember_param,)
def pre_callback(self, ldap, dn, found, not_found, *keys, **options):
assert isinstance(dn, DN)
if keys[0] in PROTECTED_GROUPS and 'user' in options:
protected_group_name = keys[0]
result = api.Command.group_show(protected_group_name)
users_left = set(result['result'].get('member_user', []))
users_deleted = set(options['user'])
if users_left.issubset(users_deleted):
raise errors.LastMemberError(key=sorted(users_deleted)[0],
label=_(u'group'), container=protected_group_name)
return dn
def post_callback(self, ldap, completed, failed, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
result = (completed, dn)
if 'ipaexternalmember' in options:
if not _dcerpc_bindings_installed:
raise errors.NotFound(reason=_('Cannot perform external member validation without '
'Samba 4 support installed. Make sure you have installed '
'server-trust-ad sub-package of IPA on the server'))
domain_validator = ipaserver.dcerpc.DomainValidator(self.api)
if not domain_validator.is_configured():
raise errors.NotFound(reason=_('Cannot perform join operation without own domain configured. '
'Make sure you have run ipa-adtrust-install on the IPA server first'))
sids = []
failed_sids = []
for sid in options['ipaexternalmember']:
if domain_validator.is_trusted_sid_valid(sid):
sids.append(sid)
else:
try:
actual_sid = domain_validator.get_trusted_domain_object_sid(sid)
except errors.PublicError as e:
failed_sids.append((sid, unicode(e)))
else:
sids.append(actual_sid)
restore = []
if 'member' in failed and 'group' in failed['member']:
restore = failed['member']['group']
failed['member']['group'] = list((id, id) for id in sids)
result = remove_external_post_callback(ldap, dn, entry_attrs,
failed=failed,
completed=completed,
memberattr='member',
membertype='group',
externalattr='ipaexternalmember',
)
failed['member']['group'] += restore + failed_sids
return result
@register()
class group_detach(LDAPQuery):
__doc__ = _('Detach a managed group from a user.')
has_output = output.standard_value
msg_summary = _('Detached group "%(value)s" from user "%(value)s"')
def execute(self, *keys, **options):
"""
This requires updating both the user and the group. We first need to
verify that both the user and group can be updated, then we go
about our work. We don't want a situation where only the user or
group can be modified and we're left in a bad state.
"""
ldap = self.obj.backend
group_dn = self.obj.get_dn(*keys, **options)
user_dn = self.api.Object['user'].get_dn(*keys)
try:
user_attrs = ldap.get_entry(user_dn)
except errors.NotFound:
self.obj.handle_not_found(*keys)
is_managed = self.obj.has_objectclass(user_attrs['objectclass'], 'mepmanagedentry')
if (not ldap.can_write(user_dn, "objectclass") or
not (ldap.can_write(user_dn, "mepManagedEntry")) and is_managed):
raise errors.ACIError(info=_('not allowed to modify user entries'))
group_attrs = ldap.get_entry(group_dn)
is_managed = self.obj.has_objectclass(group_attrs['objectclass'], 'mepmanagedby')
if (not ldap.can_write(group_dn, "objectclass") or
not (ldap.can_write(group_dn, "mepManagedBy")) and is_managed):
raise errors.ACIError(info=_('not allowed to modify group entries'))
objectclasses = user_attrs['objectclass']
try:
i = objectclasses.index('mepOriginEntry')
del objectclasses[i]
user_attrs['mepManagedEntry'] = None
ldap.update_entry(user_attrs)
except ValueError:
# Somehow the user isn't managed, let it pass for now. We'll
# let the group throw "Not managed".
pass
group_attrs = ldap.get_entry(group_dn)
objectclasses = group_attrs['objectclass']
try:
i = objectclasses.index('mepManagedEntry')
except ValueError:
# this should never happen
raise errors.NotFound(reason=_('Not a managed group'))
del objectclasses[i]
# Make sure the resulting group has the default group objectclasses
config = ldap.get_ipa_config()
def_objectclass = config.get(
self.obj.object_class_config, objectclasses
)
objectclasses = list(set(def_objectclass + objectclasses))
group_attrs['mepManagedBy'] = None
group_attrs['objectclass'] = objectclasses
ldap.update_entry(group_attrs)
return dict(
result=True,
value=pkey_to_value(keys[0], options),
)
|
apophys/freeipa
|
ipaserver/plugins/group.py
|
Python
|
gpl-3.0
| 28,467
|
import logging
import hashlib
import redo
import requests
# Self file imports
import csum
import oddity
#@profile
def downloadmar(url, checksum, cipher='sha512', output_file=None):
""" Downloads the file specified by url, verifies the checksum.
The file is written to the location specified by output file,
if not specified, the downloaded file is returned.
List of Ciphers supported is the same as those supported by
`csum.py`
"""
logging.debug('Starting download for %s with checksum: %s' % (url, checksum))
response = requests.get(url)
if response.status_code != requests.codes.ok:
logging.debug('HTTP Request to %s failed with error code %d' % (url, response.status_code))
raise oddity.DownloadError('server error')
mar = response.content
# Verify hash
if not csum.verify(mar, checksum, cipher='sha512'):
logging.warning('verification of %s with checksum %s failed' % (url, checksum))
raise oddity.DownloadError('checksums do not match')
else:
logging.info('Verified download of %s' % url)
if output_file:
# write mar to file
try:
logging.info('Writing download %s to file %s' % (url, output_file))
with open(output_file, 'wb') as f:
f.write(mar)
except:
raise
logging.error('Error while downloading %s to file %s on disk' % (url, output_file))
raise DownloadError('Failed to write file to disk')
else:
return None
else:
return mar
# return path of file or return true or false depending on whether
# download failed or succeeded
#@profile
#def downloadmar2(url, checksum, output_file=None):
def downloadmar2(url, checksum, output_file):
""" Downloads the file specified by url, verifies the checksum.
The file is written to the location specified by output file,
if not specified, the downloaded file is returned.
Chunking version
"""
logging.info('Starting download for %s with checksum: %s' % (url, checksum))
response = requests.get(url, stream=True)
if response.status_code != requests.codes.ok:
logging.debug('HTTP Request to %s failed with error code %d' % (url, response.status_code))
raise oddity.DownloadError('server error')
else:
try:
logging.info('Writing download %s to file %s' % (url, output_file))
with open(output_file, 'wb') as f:
for chunk in response.iter_content(1024*1024):
f.write(chunk)
except:
logging.error('Error while downloading %s to file %s on disk' % (url, output_file))
raise DownloadError('Failed to write file to disk')
else:
if not csum.verify(output_file, checksum, cipher='sha512', isfile=True):
logging.debug("verification of checksums failed")
raise oddity.DownloadError('checksums do not match')
return # Only works with an output_file
if __name__ == '__main__':
TEST_URL = "http://ftp.mozilla.org/pub/mozilla.org/firefox/nightly/2014-05-12-03-02-02-mozilla-central/firefox-32.0a1.en-US.mac.complete.mar"
downloadmar(TEST_URL, 'da0ecd3c65f3f333ee42ca332b97e925', 'test.mar', cipher='md5')
downloadmar(TEST_URL, '1a6bec1dd103f8aacbd450ec0787c94ccf07f5e100d7c356bf2'
'fb75c8181998563e0ded4556c9fb050ee1e7451c1ac765bc1547c8c6ec6bc'
'ffdf62ae0daf1150', 'test.mar', cipher='sha512')
|
ffledgling/Senbonzakura
|
senbonzakura/utils/fetch.py
|
Python
|
mpl-2.0
| 3,562
|
import datetime
import os
import re
import unittest
def build_substitution_table(params):
table = { '<PROJECT_NAME>' : params['project_name'],
'<PROJECT_NAME_LOWER>' : params['project_name'].lower(),
'<CURRENT_YEAR>' : str(datetime.datetime.now().year) }
if params['developer_name'] != None:
table['<DEVELOPER_NAME>'] = params['developer_name']
if params['github_user_name'] != None:
table['<GITHUB_USER_NAME>'] = params['github_user_name']
if params['github_repo'] != None:
table['<GITHUB_REPO_NAME>'] = params['github_repo']
if params['description'] != None:
table['<PROJECT_DESCRIPTION>'] = params['description']
return table
def substitute_in_line(line, substitutions):
str_buf = line
for (pattern, value) in substitutions.iteritems():
str_buf = re.sub(pattern, value, str_buf)
return str_buf
def substitute_in_file(file_name, substitutions):
with open(file_name, 'r') as f:
lines = f.readlines()
with open(file_name, 'w') as f:
for line in lines:
f.write(substitute_in_line(line, substitutions))
def build_file_list(dir_name):
files = []
for (dirpath, dirnames, filenames) in os.walk(dir_name):
for f in filenames:
files.append(os.path.join(dirpath, f))
return files
def configure_new_project(params):
project_dir = os.path.join(params['output_path'], params['project_name'].lower())
files = build_file_list(project_dir)
substitutions = build_substitution_table(params)
for f in files:
substitute_in_file(f, substitutions)
class PatternSubstitutionTests(unittest.TestCase):
def testSingleOccurenceInLine(self):
line = '<PROJECT_NAME> rest of the string'
self.assertEqual(substitute_in_line(line, { '<PROJECT_NAME>' : 'Name' }),
'Name rest of the string')
def testMultipleOccurrencesInLine(self):
line = '<DEVELOPER_NAME> is something else then <DEVELOPER_NAME>'
self.assertEqual(substitute_in_line(line, { '<DEVELOPER_NAME>' : 'Dev' }),
'Dev is something else then Dev')
def testMultiplePatternsInLine(self):
line = '<PROJECT_NAME> vegetables and then <GITHUB_USER_NAME>'
self.assertEqual(substitute_in_line(line, { '<PROJECT_NAME>' : 'Sky', '<GITHUB_USER_NAME>' : 'Cat' }),
'Sky vegetables and then Cat')
|
radupopescu/merou
|
merou/template.py
|
Python
|
mpl-2.0
| 2,467
|
import argparse
import json
import logging
import os
import sys
from tools import localpaths
from six import iteritems
from . import virtualenv
here = os.path.dirname(__file__)
wpt_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir))
def load_commands():
rv = {}
with open(os.path.join(here, "paths"), "r") as f:
paths = [item.strip().replace("/", os.path.sep) for item in f if item.strip()]
for path in paths:
abs_path = os.path.join(wpt_root, path, "commands.json")
base_dir = os.path.dirname(abs_path)
with open(abs_path, "r") as f:
data = json.load(f)
for command, props in iteritems(data):
assert "path" in props
assert "script" in props
rv[command] = {
"path": os.path.join(base_dir, props["path"]),
"script": props["script"],
"parser": props.get("parser"),
"parse_known": props.get("parse_known", False),
"help": props.get("help"),
"virtualenv": props.get("virtualenv", True),
"install": props.get("install", []),
"requirements": [os.path.join(base_dir, item)
for item in props.get("requirements", [])]
}
return rv
def parse_args(argv, commands):
parser = argparse.ArgumentParser()
parser.add_argument("--venv", action="store", help="Path to an existing virtualenv to use")
parser.add_argument("--debug", action="store_true", help="Run the debugger in case of an exception")
subparsers = parser.add_subparsers(dest="command")
for command, props in iteritems(commands):
subparsers.add_parser(command, help=props["help"], add_help=False)
args, extra = parser.parse_known_args(argv)
return args, extra
def import_command(prog, command, props):
# This currently requires the path to be a module,
# which probably isn't ideal but it means that relative
# imports inside the script work
rel_path = os.path.relpath(props["path"], wpt_root)
parts = os.path.splitext(rel_path)[0].split(os.path.sep)
mod_name = ".".join(parts)
mod = __import__(mod_name)
for part in parts[1:]:
mod = getattr(mod, part)
script = getattr(mod, props["script"])
if props["parser"] is not None:
parser = getattr(mod, props["parser"])()
parser.prog = "%s %s" % (os.path.basename(prog), command)
else:
parser = None
return script, parser
def setup_virtualenv(path, props):
if path is None:
path = os.path.join(wpt_root, "_venv")
venv = virtualenv.Virtualenv(path)
venv.start()
for name in props["install"]:
venv.install(name)
for path in props["requirements"]:
venv.install_requirements(path)
return venv
def main(prog=None, argv=None):
logging.basicConfig(level=logging.INFO)
if prog is None:
prog = sys.argv[0]
if argv is None:
argv = sys.argv[1:]
commands = load_commands()
main_args, command_args = parse_args(argv, commands)
if not(len(argv) and argv[0] in commands):
sys.exit(1)
command = main_args.command
props = commands[command]
venv = None
if props["virtualenv"]:
venv = setup_virtualenv(main_args.venv, props)
script, parser = import_command(prog, command, props)
if parser:
if props["parse_known"]:
kwargs, extras = parser.parse_known_args(command_args)
extras = (extras,)
kwargs = vars(kwargs)
else:
extras = ()
kwargs = vars(parser.parse_args(command_args))
else:
extras = ()
kwargs = {}
if venv is not None:
args = (venv,) + extras
else:
args = extras
if script:
try:
rv = script(*args, **kwargs)
if rv is not None:
sys.exit(int(rv))
except Exception:
if main_args.debug:
import pdb
pdb.post_mortem()
else:
raise
sys.exit(0)
if __name__ == "__main__":
main()
|
mbrubeck/servo
|
tests/wpt/web-platform-tests/tools/wpt/wpt.py
|
Python
|
mpl-2.0
| 4,227
|
#! /usr/bin/env python
from setuptools import setup, find_packages
setup(
name="balrogclient",
version="0.0.4",
description="Balrog Admin API Client",
author="Release Engineers",
author_email="release@mozilla.com",
packages=['balrogclient'],
test_suite='balrogclient.test',
install_requires=[
'requests',
],
include_package_data=True,
)
|
aksareen/balrog
|
client/setup.py
|
Python
|
mpl-2.0
| 389
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from collections import Mapping
from mo_dots import Data, set_default, wrap, split_field, coalesce
from mo_future import sort_using_key
from mo_logs import Log
from pyLibrary import convert
from jx_base.expressions import TupleOp
from jx_elasticsearch.es52.aggs import count_dim, aggs_iterator, format_dispatch, drill
from jx_python.containers.cube import Cube
from mo_collections.matrix import Matrix
from mo_logs.strings import quote
FunctionType = type(lambda: 1)
def format_cube(decoders, aggs, start, query, select):
# decoders = sorted(decoders, key=lambda d: -d.edge.dim) # REVERSE DECODER ORDER, BECAUSE ES QUERY WAS BUILT IN REVERSE ORDER
new_edges = count_dim(aggs, decoders)
dims = []
for e in new_edges:
if isinstance(e.value, TupleOp):
e.allowNulls = False
extra = 0 if e.allowNulls is False else 1
dims.append(len(e.domain.partitions) + extra)
dims = tuple(dims)
matricies = [(s, Matrix(dims=dims, zeros=s.default)) for s in select]
for row, coord, agg in aggs_iterator(aggs, decoders):
for s, m in matricies:
try:
v = s.pull(agg)
m[coord] = v
except Exception as e:
# THIS HAPPENS WHEN ES RETURNS MORE TUPLE COMBINATIONS THAN DOCUMENTS
if agg.get('doc_count') != 0:
Log.error("Programmer error", cause=e)
cube = Cube(
query.select,
sort_using_key(new_edges, key=lambda e: e.dim), # ENSURE EDGES ARE IN SAME ORDER AS QUERY
{s.name: m for s, m in matricies}
)
cube.frum = query
return cube
def format_cube_from_aggop(decoders, aggs, start, query, select):
agg = drill(aggs)
matricies = [(s, Matrix(dims=[], zeros=s.default)) for s in select]
for s, m in matricies:
m[tuple()] = s.pull(agg)
cube = Cube(query.select, [], {s.name: m for s, m in matricies})
cube.frum = query
return cube
def format_table(decoders, aggs, start, query, select):
new_edges = count_dim(aggs, decoders)
header = new_edges.name + select.name
def data():
dims = tuple(len(e.domain.partitions) + (0 if e.allowNulls is False else 1) for e in new_edges)
is_sent = Matrix(dims=dims, zeros=0)
if query.sort and not query.groupby:
all_coord = is_sent._all_combos() # TRACK THE EXPECTED COMBINATIONS
for row, coord, agg in aggs_iterator(aggs, decoders):
missing_coord = all_coord.next()
while coord != missing_coord:
record = [d.get_value(missing_coord[i]) for i, d in enumerate(decoders)]
for s in select:
if s.aggregate == "count":
record.append(0)
else:
record.append(None)
yield record
missing_coord = all_coord.next()
output = [d.get_value(c) for c, d in zip(coord, decoders)]
for s in select:
output.append(s.pull(agg))
yield output
else:
for row, coord, agg in aggs_iterator(aggs, decoders):
is_sent[coord] = 1
output = [d.get_value(c) for c, d in zip(coord, decoders)]
for s in select:
output.append(s.pull(agg))
yield output
# EMIT THE MISSING CELLS IN THE CUBE
if not query.groupby:
for c, v in is_sent:
if not v:
record = [d.get_value(c[i]) for i, d in enumerate(decoders)]
for s in select:
if s.aggregate == "count":
record.append(0)
else:
record.append(None)
yield record
return Data(
meta={"format": "table"},
header=header,
data=list(data())
)
def format_table_from_groupby(decoders, aggs, start, query, select):
header = [d.edge.name.replace("\\.", ".") for d in decoders] + select.name
def data():
for row, coord, agg in aggs_iterator(aggs, decoders):
if agg.get('doc_count', 0) == 0:
continue
output = [d.get_value_from_row(row) for d in decoders]
for s in select:
output.append(s.pull(agg))
yield output
return Data(
meta={"format": "table"},
header=header,
data=list(data())
)
def format_table_from_aggop(decoders, aggs, start, query, select):
header = select.name
agg = drill(aggs)
row = []
for s in select:
row.append(s.pull(agg))
return Data(
meta={"format": "table"},
header=header,
data=[row]
)
def format_tab(decoders, aggs, start, query, select):
table = format_table(decoders, aggs, start, query, select)
def data():
yield "\t".join(map(quote, table.header))
for d in table.data:
yield "\t".join(map(quote, d))
return data()
def format_csv(decoders, aggs, start, query, select):
table = format_table(decoders, aggs, start, query, select)
def data():
yield ", ".join(map(quote, table.header))
for d in table.data:
yield ", ".join(map(quote, d))
return data()
def format_list_from_groupby(decoders, aggs, start, query, select):
def data():
for row, coord, agg in aggs_iterator(aggs, decoders):
if agg.get('doc_count', 0) == 0:
continue
output = Data()
for g, d in zip(query.groupby, decoders):
output[coalesce(g.put.name, g.name)] = d.get_value_from_row(row)
for s in select:
output[s.name] = s.pull(agg)
yield output
output = Data(
meta={"format": "list"},
data=list(data())
)
return output
def format_list(decoders, aggs, start, query, select):
new_edges = count_dim(aggs, decoders)
def data():
dims = tuple(len(e.domain.partitions) + (0 if e.allowNulls is False else 1) for e in new_edges)
is_sent = Matrix(dims=dims, zeros=0)
if query.sort and not query.groupby:
# TODO: USE THE format_table() TO PRODUCE THE NEEDED VALUES INSTEAD OF DUPLICATING LOGIC HERE
all_coord = is_sent._all_combos() # TRACK THE EXPECTED COMBINATIONS
for row, coord, agg in aggs_iterator(aggs, decoders):
missing_coord = all_coord.next()
while coord != missing_coord:
# INSERT THE MISSING COORDINATE INTO THE GENERATION
output = Data()
for i, d in enumerate(decoders):
output[query.edges[i].name] = d.get_value(missing_coord[i])
for s in select:
if s.aggregate == "count":
output[s.name] = 0
yield output
missing_coord = all_coord.next()
output = Data()
for e, c, d in zip(query.edges, coord, decoders):
output[e.name] = d.get_value(c)
for s in select:
output[s.name] = s.pull(agg)
yield output
else:
is_sent = Matrix(dims=dims, zeros=0)
for row, coord, agg in aggs_iterator(aggs, decoders):
is_sent[coord] = 1
output = Data()
for e, c, d in zip(query.edges, coord, decoders):
output[e.name] = d.get_value(c)
for s in select:
output[s.name] = s.pull(agg)
yield output
# EMIT THE MISSING CELLS IN THE CUBE
if not query.groupby:
for c, v in is_sent:
if not v:
output = Data()
for i, d in enumerate(decoders):
output[query.edges[i].name] = d.get_value(c[i])
for s in select:
if s.aggregate == "count":
output[s.name] = 0
yield output
output = Data(
meta={"format": "list"},
data=list(data())
)
return output
def format_list_from_aggop(decoders, aggs, start, query, select):
agg = drill(aggs)
if isinstance(query.select, list):
item = Data()
for s in select:
item[s.name] = s.pull(agg)
else:
item = select[0].pull(agg)
if query.edges or query.groupby:
return wrap({
"meta": {"format": "list"},
"data": [item]
})
else:
return wrap({
"meta": {"format": "value"},
"data": item
})
def format_line(decoders, aggs, start, query, select):
list = format_list(decoders, aggs, start, query, select)
def data():
for d in list.data:
yield convert.value2json(d)
return data()
set_default(format_dispatch, {
None: (format_cube, format_table_from_groupby, format_cube_from_aggop, "application/json"),
"cube": (format_cube, format_cube, format_cube_from_aggop, "application/json"),
"table": (format_table, format_table_from_groupby, format_table_from_aggop, "application/json"),
"list": (format_list, format_list_from_groupby, format_list_from_aggop, "application/json"),
# "csv": (format_csv, format_csv_from_groupby, "text/csv"),
# "tab": (format_tab, format_tab_from_groupby, "text/tab-separated-values"),
# "line": (format_line, format_line_from_groupby, "application/json")
})
def _get(v, k, d):
for p in split_field(k):
try:
v = v.get(p)
if v is None:
return d
except Exception:
v = [vv.get(p) for vv in v]
return v
|
klahnakoski/MySQL-to-S3
|
vendor/jx_elasticsearch/es52/format.py
|
Python
|
mpl-2.0
| 10,438
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from nose.tools import *
import os
import tlscanary.sources_db as sdb
import tests
def test_sources_db_instance():
"""SourcesDB can list database handles"""
test_tmp_dir = os.path.join(tests.tmp_dir, "sources_db_test")
db = sdb.SourcesDB(tests.ArgsMock(workdir=test_tmp_dir))
handle_list = db.list()
assert_true(type(handle_list) is list, "handle listing is an actual list")
assert_true(len(handle_list) > 0, "handle listing is not empty")
assert_true(db.default in handle_list, "default handle appears in listing")
assert_true("list" not in handle_list, "`list` must not be an existing handle")
assert_true("debug" in handle_list, "`debug` handle is required for testing")
def test_sources_db_read():
"""SourcesDB can read databases"""
test_tmp_dir = os.path.join(tests.tmp_dir, "sources_db_test")
db = sdb.SourcesDB(tests.ArgsMock(workdir=test_tmp_dir))
src = db.read("debug")
assert_true(type(src) is sdb.Sources, "reading yields a Sources object")
assert_equal(len(src), len(src.rows), "length seems to be correct")
assert_true("hostname" in src[0].keys(), "`hostname` is amongst keys")
assert_true("rank" in src[0].keys(), "`rank` is amongst keys")
rows = [row for row in src]
assert_equal(len(rows), len(src), "yields expected number of iterable rows")
def test_sources_db_write_and_override():
"""SourcesDB databases can be written and overridden"""
test_tmp_dir = os.path.join(tests.tmp_dir, "sources_db_test")
db = sdb.SourcesDB(tests.ArgsMock(workdir=test_tmp_dir))
old = db.read("debug")
old_default = db.default
override = sdb.Sources("debug", True)
row_one = {"foo": "bar", "baz": "bang", "boom": "bang"}
row_two = {"foo": "bar2", "baz": "bang2", "boom": "bang2"}
override.append(row_one)
override.append(row_two)
db.write(override)
# New SourcesDB instance required to detect overrides
db = sdb.SourcesDB(tests.ArgsMock(workdir=test_tmp_dir))
assert_true(os.path.exists(os.path.join(test_tmp_dir, "sources", "debug.csv")), "override file is written")
assert_equal(db.default, "debug", "overriding the default works")
assert_not_equal(old_default, db.default, "overridden default actually changes")
new = db.read("debug")
assert_equal(len(new), 2, "number of overridden rows is correct")
assert_true(new[0] == row_one and new[1] == row_two, "new rows are written as expected")
assert_not_equal(old[0], new[0], "overridden rows actually change")
def test_sources_set_interface():
"""Sources object can be created from and yield sets"""
# Sets are assumed to contain (rank, hostname) pairs
src_set = {(1, "mozilla.org"), (2, "mozilla.com"), (3, "addons.mozilla.org")}
src = sdb.Sources("foo")
src.from_set(src_set)
assert_equal(len(src), 3, "database from set has correct length")
assert_equal(src_set, src.as_set(), "yielded set is identical to the original")
assert_equal(len(src.as_set(1, 2)), 1, "yielded subset has expected length")
def test_sources_sorting():
"""Sources object can sort its rows by rank"""
src_set = {(1, "mozilla.org"), (2, "mozilla.com"), (3, "addons.mozilla.org")}
src = sdb.Sources("foo")
src.from_set(src_set)
# Definitely "unsort"
if int(src.rows[0]["rank"]) < int(src.rows[1]["rank"]):
src.rows[0], src.rows[1] = src.rows[1], src.rows[0]
assert_false(int(src.rows[0]["rank"]) < int(src.rows[1]["rank"]) < int(src.rows[2]["rank"]), "list is scrambled")
src.sort()
assert_true(int(src.rows[0]["rank"]) < int(src.rows[1]["rank"]) < int(src.rows[2]["rank"]), "sorting works")
def test_sources_chunking():
"""Sources object can be iterated in chunks"""
src_set = {(1, "mozilla.org"), (2, "mozilla.com"), (3, "addons.mozilla.org"),
(4, "irc.mozilla.org"), (5, "firefox.com")}
assert_equal(len(src_set), 5, "hardcoded test set has expected length")
src = sdb.Sources("foo")
src.from_set(src_set)
next_chunk = src.iter_chunks(chunk_start=1, chunk_stop=20, chunk_size=2, min_chunk_size=100)
assert_equal(src.chunk_size, 100, "chunking respects minimum size setting")
assert_equal(src.chunk_start, 1, "chunking respects chunk start setting")
chunk = next_chunk(20)
assert_equal(len(chunk), 4, "chunks are not larger than remaining data")
read_set = set()
next_chunk = src.iter_chunks(chunk_size=2)
lengths = list()
for _ in xrange(10):
chunk = next_chunk(as_set=True)
if chunk is None:
break
lengths.append(len(chunk))
read_set.update(chunk)
assert_equal(lengths, [2, 2, 1], "chunks have expected lengths")
assert_equal(src_set, read_set, "chunks cover full set")
next_chunk = src.iter_chunks(chunk_size=10)
lengths = list()
lengths.append(len(next_chunk(1)))
lengths.append(len(next_chunk(2)))
lengths.append(len(next_chunk(3)))
assert_true(next_chunk() is None, "after last chunk comes None")
assert_true(next_chunk() is None, "after last chunk comes None again")
assert_equal(lengths, [1, 2, 2], "chunks size can be varied on-the-fly")
|
mwobensmith/tls-canary
|
tests/sources_db_test.py
|
Python
|
mpl-2.0
| 5,380
|
from django.test import TestCase
from .models import GameModel
from game_app.clean_files import reset_file, config_section_map, change_settings
import io
class JunkTestCase(TestCase):
"""Test junk_drawer functions."""
def setUp(self):
"""Setup."""
self.model = GameModel(title='test1', ini_file='ini_files/UserSettings.ini')
def test_reset_file_is_str(self):
"""Test reset file produces a str object."""
file = reset_file(self.model.ini_file)
self.assertIsInstance(file, str)
def test_rest_file_is_at_begining(self):
"""Test file has a length greater than 0."""
file = reset_file(self.model.ini_file)
self.assertGreater(len(file), 0)
def test_config_section_map(self):
"""Test Config section map."""
parsed_dict = config_section_map(self.model.ini_file)
# Assert first part of return is dict of file contents.
self.assertIsInstance(parsed_dict[0], dict)
# Assert second part retrun is str of file section.
self.assertIsInstance(parsed_dict[1], str)
def test_change_settings(self):
"""Test change_settings function changes a setting in file."""
with io.FileIO('foobar.ini', 'wb+') as file:
file.write(b'[Settings] zero_one=0')
file.seek(0)
self.assertEquals(file.read(), b'[Settings] zero_one=0')
new_data = {'zero_one': '1'}
file.seek(0)
new_file = change_settings('Settings', new_data, file)
new_file.seek(0)
self.assertIn('zero_one = 1', new_file.read())
|
flegald/Gameini
|
gameini/game_app/test_clean_files.py
|
Python
|
mpl-2.0
| 1,612
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Stock Move Date Validation',
'version': '0.1',
'category': 'Warehouse Management',
'description': "",
'author': 'Moldeo Interactive',
'website': 'http://business.moldeo.coop',
'images': [],
'depends': ['base','stock'],
'demo': [],
'data': ['stock_view.xml'],
'test': [],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
morrillo/stock_move_validation
|
__openerp__.py
|
Python
|
agpl-3.0
| 1,399
|
class GooDiffDocument:
def __init__(self, url, replaces=None):
self.url = url
self.replaces = replaces or []
def debug(self):
print "Document url:", self.url
print "Document replaces:", self.replaces
|
quuxlabs/goodiff-core
|
includes/GooDiffDocument.py
|
Python
|
agpl-3.0
| 243
|
# -*- coding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2016 Trustcode - www.trustcode.com.br #
# Danimar Ribeiro <danimaribeiro@gmail.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
from . import crm_helpdesk
from . import base_config
|
Trust-Code/trust-addons
|
trustcode_helpdesk_client/models/__init__.py
|
Python
|
agpl-3.0
| 1,599
|
# -*- coding: utf-8 -*-
# Akvo RSR is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
from akvo.utils import rsr_image_path
from django.db import models
from django.utils.translation import ugettext_lazy as _
from sorl.thumbnail.fields import ImageField
from ..fields import ValidXMLCharField
def logo_path(instance, file_name):
return rsr_image_path(instance, file_name, 'db/keyword/%(instance_pk)s/%(file_name)s')
class Keyword(models.Model):
label = ValidXMLCharField(
_('label'), max_length=100, unique=True, db_index=True,
help_text=_('Select keywords in case you are using an Akvo Page. Keywords linked to a '
'project will determine if a project appears on the Akvo Page or not.')
)
logo = ImageField(
_('logo'), blank=True, upload_to=logo_path,
help_text=_('Add your keyword logo here. You can only add one logo. '
'The logo will be shown on the project page, but not on Akvo Pages.<br/>'
'The logo should be about 1 MB in size, and should preferably be 75x75 '
'pixels and in PNG or JPG format.'),
)
def __str__(self):
return self.label
class Meta:
app_label = 'rsr'
ordering = ('label',)
verbose_name = _('keyword')
verbose_name_plural = _('keywords')
|
akvo/akvo-rsr
|
akvo/rsr/models/keyword.py
|
Python
|
agpl-3.0
| 1,547
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2018-05-03 13:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ovp_users', '0034_user_exceeded_login_attempts'),
]
operations = [
migrations.AddField(
model_name='user',
name='phone2',
field=models.CharField(blank=True, max_length=30, null=True, verbose_name='Phone 2'),
),
]
|
OpenVolunteeringPlatform/django-ovp-users
|
ovp_users/migrations/0035_user_phone2.py
|
Python
|
agpl-3.0
| 506
|
"""
Copyright (C) 2014 Kompetenzzentrum fuer wissensbasierte Anwendungen und Systeme
Forschungs- und Entwicklungs GmbH (Know-Center), Graz, Austria
office@know-center.at
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import generator
import rdflib
import math
import unicodedata
from ldva.libs.sparql.utils import SPARQLQuery
from django.utils import simplejson
class TableGenerator(generator.Generator):
mappingInfoDimension = None
mappingInfoMeasure = None
dimensions = None
labelOfDimensionArray = []
labelOfMeasureArray = []
measureContentArray = []
codeObject = {'code': """ var chartRowIndex = null; var loc=config.location;$(document).ready(function() {
$("#"+loc).html('<table cellpadding="0" cellspacing="0" border="0" class="display" id="example'+loc+'"></table>' );
$('#example'+loc).dataTable( { "oLanguage": { "oPaginate": { "sNext": "", "sPrevious": "" } }, @@@DATA@@@ } );
$("#"+loc).find('.display').before('<br />');
$("#"+loc).parents('.vis').css('overflow','auto');
chartRowIndex = $("#"+loc).parents('.vis-row').data('chartRowIndex');
table(null, channelMappings, '#'+loc, chartRowIndex, '#example'+loc);
});
"""}
results={'code':'', 'errors':''}
def __init__(self, mappingInfoForDimension, mappingIngfoForMeasure, mappingInfoForValue):
self.mappingInfoDimension = mappingInfoForDimension
self.mappingInfoMeasure = mappingIngfoForMeasure
self.mappingInfoValue = mappingInfoForValue
self.results = {'code':'', 'errors': ''}
def transform(self):
try:
self.results = {}
lineArraytwo = []
labOfdm = ""
tableForDim = {}
xEntries = []
tableForDimArray= []
for entry in self.mappingInfoDimension:
dim = entry['dimensionuri']
dimLabel = entry['label']
tableForDim = {'dimension' : '', 'label': ''}
tableForDim['dimension'] = dim
tableForDim['label'] = dimLabel
tableForDimArray.append(tableForDim)
tableForMesArray = []
for meas in self.mappingInfoMeasure:
value = meas ['measureuri']
label = meas ['label']
tableForMeasure = {'measure' : '', 'label': ''}
tableForMeasure ['measure'] = value
tableForMeasure ['label'] = label
tableForMesArray.append (tableForMeasure)
strgLabel = '"aoColumns":['
strResult = '"aaData":[ '
xAxisArray = []
for element in self.mappingInfoValue:
strg = ""
labelForDimension = ""
strg3 = ""
for i in range(len(tableForDimArray)):
xAxis = element['observation']['dimensionlabel%s'% (i)]
labelDim = tableForDimArray[i]['label']
labelForDimension = labelForDimension + ' {"sTitle":"' + labelDim + '"},'
strg = strg + '"'+xAxis+'",'
strg2 = ""
labelForValue = ""
for value in range(len(tableForMesArray)):
yAxis = element['observation']['measurevalue%s'%(value)]
labelValue = tableForMesArray[value]['label']
if not yAxis:
yAxis = str(0.0)
bol = self.isReal(yAxis)
if not bol:
yAxis = str(0.0)
strg2 = strg2 +yAxis+','
labelForValue = labelForValue + '{"sTitle":"' + labelValue + '"},'
tempStrg5List = list(labelForValue)
tempStrg5List[len(tempStrg5List)-1]=""
labelForValue = "".join(tempStrg5List)
strg3 = strg+ strg2
tempStrg3List = list(strg3)
tempStrg3List[len(tempStrg3List)-1]=""
strg3 = "".join(tempStrg3List)
strValueObject = "[" +strg3+ "], "
tempStrg4List = list(strValueObject)
tempStrg4List[len(tempStrg4List)-1]=""
strValueObject = "".join(tempStrg4List)
toDictObject = strValueObject
strResult = strResult + toDictObject
strgLabel = strgLabel+labelForDimension + labelForValue
tempList = list(strResult)
tempList[len(tempList)-1]=""
strEndResult = "".join(tempList)
strResult = strEndResult + "],"+ strgLabel + "]"
code=self.codeObject['code']
code = code.replace("@@@DATA@@@", strResult)
self.results['code'] = code
except Exception as ex:
raise Exception("-TableGenerator.transform: %s"%ex)
def isReal(self, txt):
try:
float(txt)
return True
except ValueError:
return False
|
patrickhoefler/linked-data-wizards
|
ldva/apps/visbuilder/tablegenerator.py
|
Python
|
agpl-3.0
| 5,804
|
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2015 Trustcode - www.trustcode.com.br #
# Danimar Ribeiro <danimaribeiro@gmail.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import logging
import StringIO
from openerp import api, models
from openerp.tools.translate import _
from openerp.exceptions import Warning as UserError
_logger = logging.getLogger(__name__)
try:
from ofxparse import OfxParser as ofxparser
except ImportError:
_logger.warn("ofxparse not found, OFX parsing disabled.")
ofxparser = None
class AccountBankStatementImport(models.TransientModel):
"""Extend model account.bank.statement."""
_inherit = 'account.bank.statement.import'
@api.model
def _find_bank_account_id(self, account_number):
""" Get res.partner.bank ID """
bank_account_id = None
if account_number and len(account_number) > 4:
sql = """select id from res_partner_bank
where (acc_number = %s) or ((bra_number || acc_number) = %s)"""
self.env.cr.execute(sql, [account_number, account_number])
res = self.env.cr.fetchone()
if res:
bank_account_id = res[0]
return bank_account_id
try:
from ofxparse import OfxParser as ofxparser
except ImportError:
_logger.warn("ofxparse not found, OFX parsing disabled.")
ofxparser = None
@api.model
def _check_ofx(self, data_file):
if ofxparser is None:
return False
try:
ofx = ofxparser.parse(StringIO.StringIO(data_file))
except:
return False
return ofx
@api.model
def _parse_file(self, data_file):
ofx = self._check_ofx(data_file)
if not ofx:
return super(AccountBankStatementImport, self)._parse_file(
data_file)
transactions = []
total_amt = 0.00
try:
indice = 1
for transaction in ofx.account.statement.transactions:
# Since ofxparse doesn't provide account numbers, we'll have
# to find res.partner and res.partner.bank here
# (normal behavious is to provide 'account_number', which the
# generic module uses to find partner/bank)
bank_account_id = partner_id = False
if transaction.payee:
banks = self.env['res.partner.bank'].search(
[('owner_name', '=', transaction.payee)], limit=1)
if banks:
bank_account = banks[0]
bank_account_id = bank_account.id
partner_id = bank_account.partner_id.id
vals_line = {
'date': transaction.date,
'name': transaction.payee + (
transaction.memo and ': ' + transaction.memo or ''),
'ref': transaction.id,
'amount': transaction.amount,
'unique_import_id': '%s-%s-%s-%s' % (
indice,
transaction.id,
transaction.payee,
transaction.memo),
'bank_account_id': bank_account_id,
'partner_id': partner_id,
}
indice += 1
# Memo (<NAME>) and payee (<PAYEE>) are not required
# field in OFX statement, cf section 11.4.3 Statement
# Transaction <STMTTRN> of the OFX specs: the required
# elements are in bold, cf 1.5 Conventions and these 2
# fields are not in bold.
# But the 'name' field of account.bank.statement.line is
# required=True, so we must always have a value !
# The field TRNTYPE is a required field in OFX
if not vals_line['name']:
vals_line['name'] = transaction.type.capitalize()
if transaction.checknum:
vals_line['name'] += ' %s' % transaction.checknum
total_amt += float(transaction.amount)
transactions.append(vals_line)
except Exception as e:
raise UserError(_(
"The following problem occurred during import. "
"The file might not be valid.\n\n %s" % e.message
))
vals_bank_statement = {
'name': ofx.account.number,
'transactions': transactions,
'balance_start': ofx.account.statement.balance,
'balance_end_real':
float(ofx.account.statement.balance) + total_amt,
}
return ofx.account.statement.currency, ofx.account.number, [
vals_bank_statement]
|
thinkopensolutions/odoo-brazil-banking
|
l10n_br_bank_statement_import/models/account_bank_statement_import.py
|
Python
|
agpl-3.0
| 6,195
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 DeneroTeam. (<http://www.deneroteam.com>)
# Copyright (C) 2011 Didotech Inc. (<http://www.didotech.com>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import product_catalog_wizard
|
iw3hxn/LibrERP
|
product_catalog_extend/wizard/__init__.py
|
Python
|
agpl-3.0
| 1,061
|
import essentia
import essentia.streaming as es
# import matplotlib for plotting
import matplotlib.pyplot as plt
import numpy as np
import sys
# algorithm parameters
params = { 'frameSize': 2048, 'hopSize': 512, 'startFromZero': False, 'sampleRate': 44100,'maxnSines': 100,'magnitudeThreshold': -74,'minSineDur': 0.02,'freqDevOffset': 10, 'freqDevSlope': 0.001}
# loop over all frames
audioout = np.array(0)
counter = 0
# input and output files
import os.path
tutorial_dir = os.path.dirname(os.path.realpath(__file__))
inputFilename = os.path.join(tutorial_dir, 'singing-female.wav')
outputFilename = os.path.join(tutorial_dir, 'singing-female-out-sinemodel.wav')
out = np.array(0)
loader = es.MonoLoader(filename = inputFilename, sampleRate = params['sampleRate'])
pool = essentia.Pool()
fcut = es.FrameCutter(frameSize = params['frameSize'], hopSize = params['hopSize'], startFromZero = False);
w = es.Windowing(type = "blackmanharris92");
fft = es.FFT(size = params['frameSize']);
smanal = es.SineModelAnal(sampleRate = params['sampleRate'], maxnSines = params['maxnSines'], magnitudeThreshold = params['magnitudeThreshold'], freqDevOffset = params['freqDevOffset'], freqDevSlope = params['freqDevSlope'])
smsyn = es.SineModelSynth(sampleRate = params['sampleRate'], fftSize = params['frameSize'], hopSize = params['hopSize'])
ifft = es.IFFT(size = params['frameSize']);
overl = es.OverlapAdd (frameSize = params['frameSize'], hopSize = params['hopSize'], gain = 1./params['frameSize'] );
awrite = es.MonoWriter (filename = outputFilename, sampleRate = params['sampleRate']);
# analysis
loader.audio >> fcut.signal
fcut.frame >> w.frame
w.frame >> fft.frame
fft.fft >> smanal.fft
smanal.magnitudes >> (pool, 'magnitudes')
smanal.frequencies >> (pool, 'frequencies')
smanal.phases >> (pool, 'phases')
# synthesis
smanal.magnitudes >> smsyn.magnitudes
smanal.frequencies >> smsyn.frequencies
smanal.phases >> smsyn.phases
smsyn.fft >> ifft.fft
ifft.frame >> overl.frame
overl.signal >> awrite.audio
overl.signal >> (pool, 'audio')
essentia.run(loader)
|
carthach/essentia
|
src/examples/python/musicbricks-tutorials/2-sinemodel_analsynth.py
|
Python
|
agpl-3.0
| 2,076
|
# Copyright 2019 Camptocamp SA
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl)
from odoo import fields
class Many2manyCustom(fields.Many2many):
"""Many2manyCustom field is intended to customize Many2many properties.
:param create_table: defines if the relational table must be created
at the initialization of the field (boolean)
"""
create_table = True
def update_db(self, model, columns):
if not self.create_table:
return
return super().update_db(model, columns)
fields.Many2manyCustom = Many2manyCustom
|
OCA/server-tools
|
base_m2m_custom_field/fields.py
|
Python
|
agpl-3.0
| 581
|
# coding: utf-8
import json
import responses
from django.conf import settings
from django.urls import reverse
from ipaddress import ip_address
from rest_framework import status
from kpi.constants import SUBMISSION_FORMAT_TYPE_JSON, SUBMISSION_FORMAT_TYPE_XML
from kpi.exceptions import BadFormatException
from kpi.tests.kpi_test_case import KpiTestCase
from ..constants import HOOK_LOG_FAILED
from ..models import HookLog, Hook
class MockSSRFProtect(object):
@staticmethod
def _get_ip_address(url):
return ip_address('1.2.3.4')
class HookTestCase(KpiTestCase):
def setUp(self):
self.client.login(username="someuser", password="someuser")
self.asset = self.create_asset(
"some_asset",
content=json.dumps({'survey': [
{'type': 'text', 'name': 'q1'},
{'type': 'begin_group', 'name': 'group1'},
{'type': 'text', 'name': 'q2'},
{'type': 'text', 'name': 'q3'},
{'type': 'end_group'},
{'type': 'begin_group', 'name': 'group2'},
{'type': 'begin_group', 'name': 'subgroup1'},
{'type': 'text', 'name': 'q4'},
{'type': 'text', 'name': 'q5'},
{'type': 'text', 'name': 'q6'},
{'type': 'end_group'},
{'type': 'end_group'},
]}),
format='json')
self.asset.deploy(backend='mock', active=True)
self.asset.save()
self.hook = Hook()
self._submission_pk = 1
settings.CELERY_TASK_ALWAYS_EAGER = True
def _create_hook(self, return_response_only=False, **kwargs):
format_type = kwargs.get('format_type', SUBMISSION_FORMAT_TYPE_JSON)
if format_type not in [
SUBMISSION_FORMAT_TYPE_JSON,
SUBMISSION_FORMAT_TYPE_XML,
]:
raise BadFormatException(
'The format {} is not supported'.format(format_type)
)
self.__prepare_submission()
url = reverse('hook-list', args=(self.asset.uid,))
data = {
'name': kwargs.get('name', 'some external service with token'),
'endpoint': kwargs.get('endpoint', 'http://external.service.local/'),
'settings': kwargs.get('settings', {
'custom_headers': {
'X-Token': '1234abcd'
}
}),
'export_type': format_type,
'active': kwargs.get('active', True),
'subset_fields': kwargs.get('subset_fields', []),
'payload_template': kwargs.get('payload_template', None)
}
response = self.client.post(url, data, format='json')
if return_response_only:
return response
else:
self.assertEqual(response.status_code, status.HTTP_201_CREATED,
msg=response.data)
hook = self.asset.hooks.last()
self.assertTrue(hook.active)
return hook
def _send_and_fail(self):
"""
The public method which calls this method needs to be decorated by
`@responses.activate`
:return: dict
"""
self.hook = self._create_hook()
ServiceDefinition = self.hook.get_service_definition()
submissions = self.asset.deployment.get_submissions(self.asset.owner)
submission_id = submissions[0]['_id']
service_definition = ServiceDefinition(self.hook, submission_id)
first_mock_response = {'error': 'not found'}
# Mock first request's try
responses.add(responses.POST, self.hook.endpoint,
json=first_mock_response, status=status.HTTP_404_NOT_FOUND)
# Mock next requests' tries
responses.add(responses.POST, self.hook.endpoint,
status=status.HTTP_200_OK,
content_type='application/json')
# Try to send data to external endpoint
success = service_definition.send()
self.assertFalse(success)
# Retrieve the corresponding log
url = reverse('hook-log-list', kwargs={
'parent_lookup_asset': self.hook.asset.uid,
'parent_lookup_hook': self.hook.uid
})
response = self.client.get(url)
first_hooklog_response = response.data.get('results')[0]
# Result should match first try
self.assertEqual(
first_hooklog_response.get('status_code'), status.HTTP_404_NOT_FOUND
)
self.assertEqual(
json.loads(first_hooklog_response.get('message')),
first_mock_response,
)
# Fakes Celery n retries by forcing status to `failed`
# (where n is `settings.HOOKLOG_MAX_RETRIES`)
first_hooklog = HookLog.objects.get(
uid=first_hooklog_response.get('uid')
)
first_hooklog.change_status(HOOK_LOG_FAILED)
return first_hooklog_response
def __prepare_submission(self):
v_uid = self.asset.latest_deployed_version.uid
submission = {
'__version__': v_uid,
'q1': '¿Qué tal?',
'group1/q2': '¿Cómo está en el grupo uno la primera vez?',
'group1/q3': '¿Cómo está en el grupo uno la segunda vez?',
'group2/subgroup1/q4': '¿Cómo está en el subgrupo uno la primera vez?',
'group2/subgroup1/q5': '¿Cómo está en el subgrupo uno la segunda vez?',
'group2/subgroup1/q6': '¿Cómo está en el subgrupo uno la tercera vez?',
'group2/subgroup11/q1': '¿Cómo está en el subgrupo once?',
}
self.asset.deployment.mock_submissions([submission])
|
kobotoolbox/kpi
|
kobo/apps/hook/tests/hook_test_case.py
|
Python
|
agpl-3.0
| 5,718
|
#
# This file is part of the Wiftgish project.
#
# Copyright (C) 2013 Stephen M Buben <smbuben@gmail.com>
#
# Wiftgish is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Wiftgish is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Wiftgish. If not, see <http://www.gnu.org/licenses/>.
#
import os
def webapp_add_wsgi_middleware(app):
# Enable appstats on the development server but not in production.
if os.environ.get('SERVER_SOFTWARE', '').startswith('Dev'):
from google.appengine.ext.appstats import recording
app = recording.appstats_wsgi_middleware(app)
return app
|
smbuben/wiftgish
|
appengine_config.py
|
Python
|
agpl-3.0
| 1,068
|
"""
Utility method for the accounts application
"""
# Django
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.validators import validate_email
from django.forms import ValidationError
from django.utils.safestring import mark_safe
# Standard Library
import json
import logging
import random
import re
import string
# Third Party
import requests
import stripe
from social_django.utils import load_backend, load_strategy
# MuckRock
from muckrock.core.utils import retry_on_error, stripe_retry_on_error
logger = logging.getLogger(__name__)
def unique_username(name):
"""Create a globally unique username from a name and return it."""
# username can be at most 150 characters
# strips illegal characters from username
base_username = re.sub(r"[^\w\-.@]", "", name)[:141]
username = base_username
while User.objects.filter(username__iexact=username).exists():
username = "{}_{}".format(
base_username, "".join(random.sample(string.ascii_letters, 8))
)
return username
def validate_stripe_email(email):
"""Validate an email from stripe"""
if not email:
return None
if len(email) > 254:
return None
try:
validate_email(email)
except ValidationError:
return None
return email
def stripe_get_customer(email, description):
"""Get a customer for an authenticated or anonymous user"""
return stripe_retry_on_error(
stripe.Customer.create,
description=description,
email=email,
idempotency_key=True,
)
def mailchimp_subscribe(
request, email, list_=settings.MAILCHIMP_LIST_DEFAULT, **kwargs
):
"""Adds the email to the mailing list throught the MailChimp API.
http://developer.mailchimp.com/documentation/mailchimp/reference/lists/members/"""
api_url = settings.MAILCHIMP_API_ROOT + "/lists/" + list_ + "/members/"
headers = {
"Content-Type": "application/json",
"Authorization": "apikey %s" % settings.MAILCHIMP_API_KEY,
}
merge_fields = {}
if "url" in kwargs:
merge_fields["URL"] = kwargs["url"]
if "source" in kwargs:
merge_fields["SOURCE"] = kwargs["source"]
data = {
"email_address": email,
"status": "subscribed",
"merge_fields": merge_fields,
}
response = retry_on_error(
requests.ConnectionError, requests.post, api_url, json=data, headers=headers
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exception:
if response.status_code == 400 and response.json()["title"] == "Member Exists":
if not kwargs.get("suppress_msg"):
messages.error(request, "Email is already a member of this list")
else:
if not kwargs.get("suppress_msg"):
messages.error(
request, "Sorry, an error occurred while trying to subscribe you."
)
logger.warning(exception)
return True
if not kwargs.get("suppress_msg"):
messages.success(
request,
"Thank you for subscribing to our newsletter. We sent a "
"confirmation email to your inbox.",
)
mixpanel_event(request, "Newsletter Sign Up", {"Email": email, "List": list_})
return False
def mixpanel_event(request, event, props=None, **kwargs):
"""Add an event to the session to be sent via javascript on the next page
load
"""
# only tracking logged in users for now
if props is None:
props = {}
if "mp_events" in request.session:
request.session["mp_events"].append((event, mark_safe(json.dumps(props))))
else:
request.session["mp_events"] = [(event, mark_safe(json.dumps(props)))]
if kwargs.get("signup"):
request.session["mp_alias"] = True
if kwargs.get("charge"):
request.session["mp_charge"] = kwargs["charge"]
def mini_login(request, username, password):
"""Provide authentication via squarelet via the password grant type"""
strategy = load_strategy(request)
backend = load_backend(strategy, "squarelet", redirect_uri=None)
backend.password_grant_auth = (username, password)
backend.STATE_PARAMETER = False
backend.REDIRECT_STATE = False
user = backend.complete(request=request)
return user
def user_entitlement_count(entitlement):
"""Count how many users have a certain entitlement"""
User.objects.filter(organizations__entitlement__slug=entitlement).distinct().count()
|
MuckRock/muckrock
|
muckrock/accounts/utils.py
|
Python
|
agpl-3.0
| 4,612
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import struct
import time
from hashlib import md5
import sys
import os
import random
import binascii
# CONFIG
server = "192.168.100.150"
username = ""
password = ""
host_name = "LIYUANYUAN"
host_os = "8089D"
host_ip = "10.30.22.17"
PRIMARY_DNS = "114.114.114.114"
dhcp_server = "0.0.0.0"
mac = 0xb888e3051680
CONTROLCHECKSTATUS = b'\x20'
ADAPTERNUM = b'\x01'
KEEP_ALIVE_VERSION = b'\xdc\x02'
AUTH_VERSION = b'\x0a\x00'
IPDOG = b'\x01'
ror_version = False
# CONFIG_END
'''
AUTH_VERSION:
unsigned char ClientVerInfoAndInternetMode;
unsigned char DogVersion;
'''
nic_name = '' #Indicate your nic, e.g. 'eth0.2'.nic_name
bind_ip = '0.0.0.0'
class ChallengeException (Exception):
def __init__(self):
pass
class LoginException (Exception):
def __init__(self):
pass
def bind_nic():
try:
import fcntl
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
return get_ip_address(nic_name)
except ImportError as e:
print('Indicate nic feature need to be run under Unix based system.')
return '0.0.0.0'
except IOError as e:
print(nic_name + 'is unacceptable !')
return '0.0.0.0'
finally:
return '0.0.0.0'
if nic_name != '':
bind_ip = bind_nic()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((bind_ip, 61440))
s.settimeout(3)
SALT = ''
IS_TEST = True
# specified fields based on version
CONF = "/etc/drcom.conf"
UNLIMITED_RETRY = True
EXCEPTION = False
DEBUG = False #log saves to file
LOG_PATH = '/tmp/drcom_client.log'
if IS_TEST:
DEBUG = True
LOG_PATH = 'drcom_client.log'
def log(*args, **kwargs):
s = ' '.join(args)
print(s)
if DEBUG:
with open(LOG_PATH, 'a') as f:
f.write(s + '\n')
def md5sum(s):
m = md5()
m.update(s)
return m.digest()
def dump(n):
s = '%x' % n
if len(s) & 1:
s = '0' + s
return binascii.unhexlify(bytes(s, 'ascii'))
def ror(md5, pwd):
ret = ''
for i in range(len(pwd)):
x = ord(md5[i]) ^ ord(pwd[i])
ret += struct.pack("B", ((x<<3)&0xFF) + (x>>5))
return ret
def challenge(svr,ran):
while True:
t = struct.pack("<H", int(ran)%(0xFFFF))
s.sendto(b"\x01\x02" + t + b"\x09" + b"\x00"*15, (svr, 61440))
try:
data, address = s.recvfrom(1024)
log('[challenge] recv', str(binascii.hexlify(data))[2:][:-1])
except Exception as e:
raise(e)
log('[challenge] timeout, retrying...')
continue
if address == (svr, 61440):
break
else:
continue
log('[DEBUG] challenge:\n' + str(binascii.hexlify(data))[2:][:-1])
if data[:1] != b'\x02':
raise ChallengeException
log('[challenge] challenge packet sent.')
return data[4:8]
def keep_alive_package_builder(number,random,tail,type=1,first=False):
data = b'\x07'+ bytes([number]) + b'\x28\x00\x0B' + bytes([type])
if first :
data += b'\x0F\x27'
else:
data += KEEP_ALIVE_VERSION
data += b'\x2F\x12' + b'\x00' * 6
data += tail
data += b'\x00' * 4
#data += struct.pack("!H",0xdc02)
if type == 3:
foo = b''.join([bytes([int(i)]) for i in host_ip.split('.')]) # host_ip
#CRC
# edited on 2014/5/12, filled zeros to checksum
# crc = packet_CRC(data+foo)
crc = b'\x00' * 4
#data += struct.pack("!I",crc) + foo + b'\x00' * 8
data += crc + foo + b'\x00' * 8
else: #packet type = 1
data += b'\x00' * 16
return data
# def packet_CRC(s):
# ret = 0
# for i in re.findall('..', s):
# ret ^= struct.unpack('>h', i)[0]
# ret &= 0xFFFF
# ret = ret * 0x2c7
# return ret
def keep_alive2(*args):
#first keep_alive:
#number = number (mod 7)
#status = 1: first packet user sended
# 2: first packet user recieved
# 3: 2nd packet user sended
# 4: 2nd packet user recieved
# Codes for test
tail = ''
packet = ''
svr = server
ran = random.randint(0, 0xFFFF)
ran += random.randint(1, 10)
# 2014/10/15 add by latyas, maybe svr sends back a file packet
svr_num = 0
packet = keep_alive_package_builder(svr_num,dump(ran), b'\x00'*4, 1, True)
while True:
log('[keep-alive2] send1', str(binascii.hexlify(packet))[2:][:-1])
s.sendto(packet, (svr, 61440))
data, address = s.recvfrom(1024)
log('[keep-alive2] recv1', str(binascii.hexlify(data))[2:][:-1])
if data.startswith(b'\x07\x00\x28\x00') or data.startswith(b'\x07' + bytes([svr_num]) + b'\x28\x00'):
break
elif data[:1] == b'\x07' and data[2:3] == b'\x10':
log('[keep-alive2] recv file, resending..')
svr_num = svr_num + 1
# packet = keep_alive_package_builder(svr_num,dump(ran),'\x00'*4,1, False)
break
else:
log('[keep-alive2] recv1/unexpected', str(binascii.hexlify(data))[2:][:-1])
#log('[keep-alive2] recv1', str(binascii.hexlify(data))[2:][:-1])
ran += random.randint(1, 10)
packet = keep_alive_package_builder(svr_num, dump(ran), b'\x00'*4, 1, False)
log('[keep-alive2] send2', str(binascii.hexlify(packet))[2:][:-1])
s.sendto(packet, (svr, 61440))
while True:
data, address = s.recvfrom(1024)
if data[:1] == b'\x07':
svr_num = svr_num + 1
break
else:
log('[keep-alive2] recv2/unexpected', str(binascii.hexlify(data))[2:][:-1])
log('[keep-alive2] recv2', str(binascii.hexlify(data))[2:][:-1])
tail = data[16:20]
ran += random.randint(1, 10)
packet = keep_alive_package_builder(svr_num, dump(ran), tail, 3, False)
log('[keep-alive2] send3', str(binascii.hexlify(packet))[2:][:-1])
s.sendto(packet, (svr, 61440))
while True:
data, address = s.recvfrom(1024)
if data[:1] == b'\x07':
svr_num = svr_num + 1
break
else:
log('[keep-alive2] recv3/unexpected', str(binascii.hexlify(data))[2:][:-1])
log('[keep-alive2] recv3', str(binascii.hexlify(data))[2:][:-1])
tail = data[16:20]
log("[keep-alive2] keep-alive2 loop was in daemon.")
i = svr_num
while True:
try:
time.sleep(20)
keep_alive1(*args)
ran += random.randint(1, 10)
packet = keep_alive_package_builder(i, dump(ran), tail, 1, False)
#log('DEBUG: keep_alive2,packet 4\n', str(binascii.hexlify(packet))[2:][:-1])
log('[keep_alive2] send',str(i), str(binascii.hexlify(packet))[2:][:-1])
s.sendto(packet, (svr, 61440))
data, address = s.recvfrom(1024)
log('[keep_alive2] recv', str(binascii.hexlify(data))[2:][:-1])
tail = data[16:20]
#log('DEBUG: keep_alive2,packet 4 return\n', str(binascii.hexlify(data))[2:][:-1])
ran += random.randint(1, 10)
packet = keep_alive_package_builder(i+1, dump(ran), tail, 3, False)
#log('DEBUG: keep_alive2,packet 5\n', str(binascii.hexlify(packet))[2:][:-1])
s.sendto(packet, (svr, 61440))
log('[keep_alive2] send', str(i+1), str(binascii.hexlify(packet))[2:][:-1])
data, address = s.recvfrom(1024)
log('[keep_alive2] recv', str(binascii.hexlify(data))[2:][:-1])
tail = data[16:20]
#log('DEBUG: keep_alive2,packet 5 return\n', str(binascii.hexlify(data))[2:][:-1])
i = (i+2) % 127 #must less than 128 ,else the keep_alive2() couldn't receive anything.
except:
pass
def checksum(s):
ret = 1234
x = 0
for i in [x*4 for x in range(0, -(-len(s)//4))]:
ret ^= int(binascii.hexlify(s[i:i+4].ljust(4, b'\x00')[::-1]), 16)
ret = (1968 * ret) & 0xffffffff
return struct.pack('<I', ret)
def mkpkt(salt, usr, pwd, mac):
'''
struct _tagLoginPacket
{
struct _tagDrCOMHeader Header;
unsigned char PasswordMd5[MD5_LEN];
char Account[ACCOUNT_MAX_LEN];
unsigned char ControlCheckStatus;
unsigned char AdapterNum;
unsigned char MacAddrXORPasswordMD5[MAC_LEN];
unsigned char PasswordMd5_2[MD5_LEN];
unsigned char HostIpNum;
unsigned int HostIPList[HOST_MAX_IP_NUM];
unsigned char HalfMD5[8];
unsigned char DogFlag;
unsigned int unkown2;
struct _tagHostInfo HostInfo;
unsigned char ClientVerInfoAndInternetMode;
unsigned char DogVersion;
};
'''
data = b'\x03\x01\x00' + bytes([len(usr) + 20])
data += md5sum(b'\x03\x01' + salt + pwd.encode())
data += (usr.encode() + 36*b'\x00')[:36]
data += CONTROLCHECKSTATUS
data += ADAPTERNUM
data += dump(int(binascii.hexlify(data[4:10]), 16)^mac)[-6:] #mac xor md51
data += md5sum(b'\x01' + pwd.encode() + salt + b'\x00'*4) #md52
data += b'\x01' # number of ip
data += b''.join([bytes([int(i)]) for i in host_ip.split('.')]) #x.x.x.x ->
data += b'\00' * 4 #your ipaddress 2
data += b'\00' * 4 #your ipaddress 3
data += b'\00' * 4 #your ipaddress 4
data += md5sum(data + b'\x14\x00\x07\x0B')[:8] #md53
data += IPDOG
data += b'\x00'*4 # unknown2
'''
struct _tagOSVERSIONINFO
{
unsigned int OSVersionInfoSize;
unsigned int MajorVersion;
unsigned int MinorVersion;
unsigned int BuildNumber;
unsigned int PlatformID;
char ServicePack[128];
};
struct _tagHostInfo
{
char HostName[HOST_NAME_MAX_LEN];
unsigned int DNSIP1;
unsigned int DHCPServerIP;
unsigned int DNSIP2;
unsigned int WINSIP1;
unsigned int WINSIP2;
struct _tagDrCOM_OSVERSIONINFO OSVersion;
};
'''
data += (host_name.encode() + 32 * b'\x00')[:32] # _tagHostInfo.HostName
data += b''.join([bytes([int(i)]) for i in PRIMARY_DNS.split('.')]) # _tagHostInfo.DNSIP1
data += b''.join([bytes([int(i)]) for i in dhcp_server.split('.')]) # _tagHostInfo.DHCPServerIP
data += b'\x00\x00\x00\x00' # _tagHostInfo.DNSIP2
data += b'\x00' * 4 # _tagHostInfo.WINSIP1
data += b'\x00' * 4 # _tagHostInfo.WINSIP2
data += b'\x94\x00\x00\x00' # _tagHostInfo.OSVersion.OSVersionInfoSize
data += b'\x05\x00\x00\x00' # _tagHostInfo.OSVersion.MajorVersion
data += b'\x01\x00\x00\x00' # _tagHostInfo.OSVersion.MinorVersion
data += b'\x28\x0A\x00\x00' # _tagHostInfo.OSVersion.BuildNumber
data += b'\x02\x00\x00\x00' # _tagHostInfo.OSVersion.PlatformID
# _tagHostInfo.OSVersion.ServicePack
data += (host_os.encode() + 32 * b'\x00')[:32]
data += b'\x00' * 96
# END OF _tagHostInfo
data += AUTH_VERSION
if ror_version:
'''
struct _tagLDAPAuth
{
unsigned char Code;
unsigned char PasswordLen;
unsigned char Password[MD5_LEN];
};
'''
data += b'\x00' # _tagLDAPAuth.Code
data += bytes([len(pwd)]) # _tagLDAPAuth.PasswordLen
data += ror(md5sum(b'\x03\x01' + salt + pwd), pwd) # _tagLDAPAuth.Password
'''
struct _tagDrcomAuthExtData
{
unsigned char Code;
unsigned char Len;
unsigned long CRC;
unsigned short Option;
unsigned char AdapterAddress[MAC_LEN];
};
'''
data += b'\x02' # _tagDrcomAuthExtData.Code
data += b'\x0C' # _tagDrcomAuthExtData.Len
data += checksum(data + b'\x01\x26\x07\x11\x00\x00' + dump(mac)) # _tagDrcomAuthExtData.CRC
data += b'\x00\x00' # _tagDrcomAuthExtData.Option
data += dump(mac) # _tagDrcomAuthExtData.AdapterAddress
# END OF _tagDrcomAuthExtData
data += b'\x00' # auto logout / default: False
data += b'\x00' # broadcast mode / default : False
data += b'\xE9\x13' #unknown, filled numbers randomly =w=
log('[mkpkt]', str(binascii.hexlify(data))[2:][:-1])
return data
def login(usr, pwd, svr):
global SALT
global AUTH_INFO
i = 0
while True:
salt = challenge(svr, time.time()+random.randint(0xF, 0xFF))
SALT = salt
packet = mkpkt(salt, usr, pwd, mac)
log('[login] send', str(binascii.hexlify(packet))[2:][:-1])
s.sendto(packet, (svr, 61440))
data, address = s.recvfrom(1024)
log('[login] recv', str(binascii.hexlify(data))[2:][:-1])
log('[login] packet sent.')
if address == (svr, 61440):
if data[:1] == b'\x04':
log('[login] loged in')
AUTH_INFO = data[23:39]
break
else:
log('[login] login failed.')
if IS_TEST:
time.sleep(3)
else:
time.sleep(30)
continue
else:
if i >= 5 and UNLIMITED_RETRY == False :
log('[login] exception occured.')
sys.exit(1)
else:
continue
log('[login] login sent')
#0.8 changed:
return data[23:39]
#return data[-22:-6]
def logout(usr, pwd, svr, mac, auth_info):
salt = challenge(svr, time.time()+random.randint(0xF, 0xFF))
if salt:
data = b'\x06\x01\x00' + bytes([len(usr) + 20])
data += md5sum(b'\x03\x01' + salt + pwd.encode())
data += (usr + 36*'\x00')[:36]
data += CONTROLCHECKSTATUS
data += ADAPTERNUM
data += dump(int(binascii.hexlify(data[4:10]), 16)^mac)[-6:]
# data += b'\x44\x72\x63\x6F' # Drco
data += auth_info
s.send(data)
data, address = s.recvfrom(1024)
if data[:1] == b'\x04':
log('[logout_auth] logouted.')
def keep_alive1(salt, tail, pwd, svr):
foo = struct.pack('!H',int(time.time())%0xFFFF)
data = b'\xff' + md5sum(b'\x03\x01' + salt + pwd.encode()) + b'\x00\x00\x00'
data += tail
data += foo + b'\x00\x00\x00\x00'
log('[keep_alive1] send', str(binascii.hexlify(data))[2:][:-1])
s.sendto(data, (svr, 61440))
while True:
data, address = s.recvfrom(1024)
if data[:1] == b'\x07':
break
else:
log('[keep-alive1]recv/not expected', str(binascii.hexlify(data))[2:][:-1])
log('[keep-alive1] recv', str(binascii.hexlify(data))[2:][:-1])
def empty_socket_buffer():
#empty buffer for some fucking schools
log('starting to empty socket buffer')
try:
while True:
data, address = s.recvfrom(1024)
log('recived sth unexpected', str(binascii.hexlify(data))[2:][:-1])
if s == '':
break
except:
# get exception means it has done.
log('exception in empty_socket_buffer')
pass
log('emptyed')
def daemon():
with open('/var/run/jludrcom.pid','w') as f:
f.write(str(os.getpid()))
def main():
if not IS_TEST:
daemon()
execfile(CONF, globals())
log("auth svr: " + server + "\nusername: " + username + "\npassword: " + password + "\nmac: " + str(hex(mac))[:-1])
log("bind ip: " + bind_ip)
while True:
try:
package_tail = login(username, password, server)
except LoginException:
continue
log('package_tail', str(binascii.hexlify(package_tail))[2:][:-1])
#keep_alive1 is fucking bullshit!
empty_socket_buffer()
keep_alive1(SALT, package_tail, password, server)
keep_alive2(SALT, package_tail, password, server)
if __name__ == "__main__":
main()
|
drcoms/drcom-generic
|
latest-wired-python3.py
|
Python
|
agpl-3.0
| 15,765
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2014 Didotech srl (info@didotech.com)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import re
from openerp.osv import orm
from openerp.tools.config import config
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
ENABLE_CACHE = config.get('product_cache', False)
class MrpBom(orm.Model):
_inherit = 'mrp.bom'
def create(self, cr, uid, vals, context=None):
context = context or self.pool['res.users'].context_get(cr, uid)
if not vals.get('bom_id', False):
self.pool['product.product'].write(
cr, uid, vals['product_id'],
{'supply_method': 'produce', 'purchase_ok': False},
context
)
if vals.get('product_id', False):
for product_id in self.GetWhereUsed(cr, uid, [vals['product_id']], context)[1].keys():
if int(product_id) in self.pool['product.product'].product_cost_cache:
del self.pool['product.product'].product_cost_cache[int(product_id)]
return super(MrpBom, self).create(cr, uid, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = self.pool['res.users'].context_get(cr, uid)
product_model = self.pool['product.product']
bom_ids = []
for bom in self.browse(cr, uid, ids, context):
if not bom.bom_id:
bom_ids.append(bom.id)
boms = self.browse(cr, uid, bom_ids, context)
for product_id in [bom.product_id.id for bom in boms]:
bom_ids_count = self.search(cr, uid, [('product_id', '=', product_id), ('bom_id', '=', False)], count=True)
if bom_ids_count == 1:
product_model.write(cr, uid, product_id, {'supply_method': 'buy', 'purchase_ok': True}, context=context)
return super(MrpBom, self).unlink(cr, uid, ids, context=context)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = self.pool['res.users'].context_get(cr, uid)
if isinstance(ids, (int, long)):
ids = [ids]
boms = self.browse(cr, uid, ids, context)
product_ids = []
for bom in boms:
product_old_id = bom.product_id.id
parent_id = bom.bom_id
if not parent_id and vals.get('product_id', False) and product_old_id != vals['product_id']:
# on new product set that have bom
self.pool['product.product'].write(cr, uid, vals['product_id'], {'supply_method': 'produce', 'purchase_ok': False}, context)
bom_ids_count = self.search(cr, uid, [('product_id', '=', product_old_id), ('bom_id', '=', False)], count=True)
if bom_ids_count == 1:
self.pool['product.product'].write(cr, uid, product_old_id, {'supply_method': 'buy', 'purchase_ok': True}, context)
if ('bom_lines' in vals or 'routing_id' in vals) and ENABLE_CACHE:
product_ids.append(product_old_id)
if vals.get('product_id', False):
product_ids.append(int(vals['product_id']))
if product_ids:
changed_product = self.GetWhereUsed(cr, uid, product_ids, context)[1].keys()
for product_id in changed_product:
if int(product_id) in self.pool['product.product'].product_cost_cache:
del self.pool['product.product'].product_cost_cache[int(product_id)]
if 'bom_lines' in vals:
for bom_line in vals['bom_lines']:
if bom_line[0] == 2 or isinstance(bom_line[2], dict) and 'product_qty' in bom_line[2]:
if product_old_id in self.pool['product.product'].product_cost_cache:
del self.pool['product.product'].product_cost_cache[product_old_id]
return super(MrpBom, self).write(cr, uid, ids, vals, context=context)
def action_view_bom(self, cr, uid, ids, context=None):
line = self.browse(cr, uid, ids, context)[0]
view = self.pool['ir.model.data'].get_object_reference(cr, uid, 'mrp', 'mrp_bom_tree_view')
view_id = view and view[1] or False
return {
'type': 'ir.actions.act_window',
'name': _('Product BOM'),
'res_model': 'mrp.bom',
'view_type': 'tree',
'view_mode': 'tree',
'view_id': [view_id],
'domain': [('product_id', '=', line.product_id.id),
('bom_id', '=', False)],
# 'target': 'new',
'res_id': False
}
def GetWhereUsed(self, cr, uid, ids, context=None):
"""
Return a list of all fathers of a Part (all levels)
"""
if not isinstance(ids, (list, tuple)):
ids = [ids]
self._packed = []
relDatas = []
if len(ids) < 1:
return None
sid = False
if len(ids) > 1:
sid = ids[1]
oid = ids[0]
relDatas.append(oid)
relDatas.append(self._implodebom(cr, uid, self._inbomid(cr, uid, oid, sid, context), context))
prtDatas = self._getpackdatas(cr, uid, relDatas, context)
return (relDatas, prtDatas, self._getpackreldatas(cr, uid, relDatas, prtDatas, context))
def _getpackdatas(self, cr, uid, relDatas, context=None):
prtDatas = {}
non_decimal = re.compile(r'[^\d.]+')
tmpbuf = (((str(relDatas).replace('[', '')).replace(']', '')).replace('(', '')).replace(')', '').split(',')
tmpids = [int(non_decimal.sub('', tmp)) for tmp in tmpbuf if len(non_decimal.sub('', tmp).strip()) > 0]
if len(tmpids) < 1:
return prtDatas
compType = self.pool['product.product']
tmpDatas = compType.read(cr, uid, tmpids, context)
for tmpData in tmpDatas:
for keyData in tmpData.keys():
if not tmpData[keyData]:
del tmpData[keyData]
prtDatas[str(tmpData['id'])] = tmpData
return prtDatas
def _getpackreldatas(self, cr, uid, relDatas, prtDatas, context=None):
relids = {}
relationDatas = {}
non_decimal = re.compile(r'[^\d.]+')
tmpbuf = (((str(relDatas).replace('[', '')).replace(']', '')).replace('(', '')).replace(')', '').split(',')
tmpids = [int(non_decimal.sub('', tmp)) for tmp in tmpbuf if len(non_decimal.sub('', tmp).strip()) > 0]
if len(tmpids) < 1:
return prtDatas
for keyData in prtDatas.keys():
tmpData = prtDatas[keyData]
if len(tmpData.get('bom_ids', [])) > 0:
relids[keyData] = tmpData['bom_ids'][0]
if len(relids) < 1:
return relationDatas
setobj = self.pool['mrp.bom']
for keyData in relids.keys():
relationDatas[keyData] = setobj.read(cr, uid, relids[keyData], context)
return relationDatas
def _implodebom(self, cr, uid, bomObjs, context=None):
"""
Execute implosion for a a bom object
"""
pids = []
for bomObj in bomObjs:
if not bomObj.product_id:
continue
if bomObj.product_id.id in self._packed:
continue
self._packed.append(bomObj.product_id.id)
innerids = self._implodebom(cr, uid, self._inbomid(cr, uid, bomObj.product_id.id, context))
pids.append((bomObj.product_id.id, innerids))
return (pids)
def GetWhereUsedSum(self, cr, uid, ids, context=None):
"""
Return a list of all fathers of a Part (all levels)
"""
self._packed = []
relDatas = []
if len(ids) < 1:
return None
sid = False
if len(ids) > 1:
sid = ids[1]
oid = ids[0]
relDatas.append(oid)
relDatas.append(self._implodebom(cr, uid, self._inbomid(cr, uid, oid, sid, context), context))
prtDatas = self._getpackdatas(cr, uid, relDatas, context)
return (relDatas, prtDatas, self._getpackreldatas(cr, uid, relDatas, prtDatas))
def _bomid(self, cr, uid, pid, sid=None, context=None):
if sid:
return self._getbomid(cr, uid, pid, sid, context)
else:
return self._getbomidnullsrc(cr, uid, pid, context)
def _inbomid(self, cr, uid, pid, sid=None, context=None):
if sid:
return self._getinbom(cr, uid, pid, sid, context)
else:
return self._getinbomidnullsrc(cr, uid, pid, context)
def _getbomid(self, cr, uid, pid, sid, context=None):
ids = self._getidbom(cr, uid, pid, sid, context)
return self.browse(cr, uid, list(set(ids)), context)
def _getidbom(self, cr, uid, pid, sid, context):
ids = self.search(cr, uid, [('product_id', '=', pid), ('bom_id', '=', False)], context=context)
return list(set(ids))
def _getinbom(self, cr, uid, pid, sid, context):
ids = self.search(cr, uid, [('product_id', '=', pid), ('bom_id', '!=', False)], context=context)
return self.browse(cr, uid, ids, context)
def _getinbomidnullsrc(self, cr, uid, pid, context=None):
counted = []
ids = self.search(cr, uid, [('product_id', '=', pid), ('bom_id', '!=', False)], context=context)
for obj in self.browse(cr, uid, ids, context):
if obj.bom_id in counted:
continue
counted.append(obj.bom_id)
return counted
|
iw3hxn/LibrERP
|
product_bom/models/mrp_bom.py
|
Python
|
agpl-3.0
| 10,462
|
from comics.aggregator.crawler import ComicControlCrawlerBase
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "Sticky Dilly Buns"
language = "en"
url = "http://www.stickydillybuns.com/"
start_date = "2013-01-07"
rights = "G. Lagace"
active = False
class Crawler(ComicControlCrawlerBase):
history_capable_days = 50
schedule = "Mo,Fr"
time_zone = "US/Eastern"
def crawl(self, pub_date):
return self.crawl_helper(
"https://pixietrixcomix.com/sticky-dilly-buns", pub_date
)
|
jodal/comics
|
comics/comics/stickydillybuns.py
|
Python
|
agpl-3.0
| 583
|
# coding=UTF-8
from time import time
from bellum.stats.models import Report
def makeReport(repdata, title):
return Report.create(repdata, title)
def sendTo(report, account, solicited):
'''Solicited defines whether the report was solicited or no. It affects presenting it in chat system'''
from bellum.chat.models import LCDMessage, LCDMT_UR, LCDMT_SR
sm = LCDMessage(recipient=account,
author=None,
msgtype={True:LCDMT_SR, False:LCDMT_UR}[solicited],
message_ref=report.id)
sm.save()
report.link()
|
piotrmaslanka/bellum
|
stats/reports/__init__.py
|
Python
|
agpl-3.0
| 583
|
# -*- coding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2016 Dominic Krimmer #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from openerp import models, fields, api
class IndustrialClassification(models.Model):
_name = "ciiu" # res.co.ciiu
_description = "ISIC List"
name = fields.Char(
string="Code and Description",
store=True,
compute="_compute_concat_name"
)
code = fields.Char('Code', required=True)
description = fields.Char('Description', required=True)
type = fields.Char(
'Type',
store=True,
compute="_compute_set_type"
)
hasParent = fields.Boolean('Has Parent?')
parent = fields.Many2one('ciiu', 'Parent')
hasDivision = fields.Boolean('Has Division?')
division = fields.Many2one('ciiu', 'Division')
hasSection = fields.Boolean('Has Section?')
section = fields.Many2one('ciiu', 'Section')
hierarchy = fields.Selection(
[
(1, 'Has Parent?'),
(2, 'Has Division?'),
(3, 'Has Section?')
],
'Hierarchy'
)
@api.depends('code', 'description')
def _compute_concat_name(self):
"""
This function concatinates two fields in order to be able to search
for CIIU as number or string
@return: void
"""
if self.code is False or self.description is False:
self.name = ''
else:
self.name = str(self.code.encode('utf-8').strip()) + \
' - ' + str(self.description.encode('utf-8').strip())
@api.depends('hasParent')
def _compute_set_type(self):
"""
Section, Division and Parent should be visually separated in the tree
view. Therefore we tag them accordingly as 'view' or 'other'
@return: void
"""
# Child
if self.hasParent is True:
if self.division is True:
self.type = 'view'
elif self.section is True:
self.type = 'view'
else:
self.type = 'other'
# Division
else:
self.type = 'view'
|
dkrimmer84/l10n_co_res_partner
|
models/ciiu.py
|
Python
|
agpl-3.0
| 3,395
|
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import deform
import colander
from pyramid.view import view_config
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.default_behavior import Cancel
from pontus.form import FormView
from pontus.schema import Schema
from pontus.widget import RadioChoiceWidget
from novaideo.content.processes.idea_management.behaviors import (
MakeOpinion)
from novaideo.content.idea import Idea, OPINIONS
from novaideo.widget import LimitedTextAreaWidget
from novaideo import _
@colander.deferred
def opinion_choice(node, kw):
values = OPINIONS.items()
return RadioChoiceWidget(values=values)
class OpinionSchema(Schema):
opinion = colander.SchemaNode(
colander.String(),
widget=opinion_choice,
title=_('Opinion'),
default='to_study'
)
explanation = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=600),
widget=LimitedTextAreaWidget(rows=5,
cols=30,
limit=600),
title=_("Explanation")
)
@view_config(
name='makeopinionformidea',
context=Idea,
renderer='pontus:templates/views_templates/grid.pt',
)
class MakeOpinionFormView(FormView):
title = _('Give your opinion')
schema = OpinionSchema()
behaviors = [MakeOpinion, Cancel]
formid = 'formmakeopinionidea'
name = 'makeopinionformidea'
def default_data(self):
return getattr(self.context, 'opinion', {})
def before_update(self):
self.action = self.request.resource_url(
self.context, 'novaideoapi',
query={'op': 'update_action_view',
'node_id': MakeOpinion.node_definition.id})
self.schema.widget = deform.widget.FormWidget(
css_class='deform novaideo-ajax-form')
DEFAULTMAPPING_ACTIONS_VIEWS.update({MakeOpinion: MakeOpinionFormView})
|
ecreall/nova-ideo
|
novaideo/views/idea_management/make_opinion.py
|
Python
|
agpl-3.0
| 2,080
|
from . import update, read
|
uclouvain/osis_louvain
|
base/views/education_groups/group_element_year/__init__.py
|
Python
|
agpl-3.0
| 27
|
# This file is part of rhizi, a collaborative knowledge graph editor.
# Copyright (C) 2014-2015 Rhizi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
from .neo4j_cypher import Query_Struct_Type, DB_Query
from .neo4j_util import aifnode__ns_label, rzdoc__ns_label, rzdoc__meta_ns_label
from .neo4j_cypher_parser import p_path, p_node
from .db_op import DBO_aifnode__clone, DBO_rzdoc__clone, DB_op
class Query_Transformation(object):
"""
DB_op transformation
"""
def __call__(self, value):
"""
Apply transformation to either a DB_op or a DB_Query
"""
assert(isinstance(value, DB_op))
return self.apply_to_db_op(value)
def apply_to_db_op(self, op):
for dbq in op: # apply to sub queries
assert(isinstance(op, DB_op))
self.apply_to_single_query(dbq)
return op
def apply_to_single_query(self, dbq): pass # subclass hook
class QT_RZDOC_NS_Filter__common(Query_Transformation):
"""
Add RZDoc name-space filter:
- inject NS labels into node patterns
- [!] ignore nodes which are part of path patterns to avoid overriding bound references
"""
def __init__(self, ns_label):
self.ns_label = ns_label
def deco__process_q_ret__n_label_set(self, label_set):
ret = [lbl for lbl in label_set if lbl != self.ns_label]
return ret
def apply_to_db_op(self, op):
ret = Query_Transformation.apply_to_db_op(self, op)
# override DBO_rzdoc__clone.process_q_ret__n_label_set hook
if op.__class__ == DBO_rzdoc__clone:
op.process_q_ret__n_label_set = self.deco__process_q_ret__n_label_set
return ret
def apply_to_single_query(self, dbq):
q_type = dbq.query_struct_type
clause_set = []
if Query_Struct_Type.w == q_type:
clause_set += dbq.pt_root.clause_set_by_kw('create')
if Query_Struct_Type.r == q_type:
clause_set += dbq.pt_root.clause_set_by_kw('match')
if Query_Struct_Type.rw == q_type:
clause_set += dbq.pt_root.clause_set_by_kw('create')
clause_set += dbq.pt_root.clause_set_by_kw('match')
for c in clause_set:
n_exp_set = c.sub_exp_set_by_type(p_node, recurse=True)
for n_exp in n_exp_set:
if n_exp.parent.__class__ == p_path:
continue;
lbl_set = n_exp.label_set
if not lbl_set: # add label set if necessary
lbl_set = n_exp.spawn_label_set()
lbl_set.add_label(self.ns_label)
# log.debug('db_q trans: in clause: %s, out clause: %s' % (cur_clause, new_clause))
class QT_AIFNODE_NS_Filter__common(Query_Transformation):
"""
Add AIFNode name-space filter:
- inject NS labels into node patterns
- [!] ignore nodes which are part of path patterns to avoid overriding bound references
"""
def __init__(self, ns_label):
self.ns_label = ns_label
def deco__process_q_ret__n_label_set(self, label_set):
ret = [lbl for lbl in label_set if lbl != self.ns_label]
return ret
def apply_to_db_op(self, op):
ret = Query_Transformation.apply_to_db_op(self, op)
# override DBO_rzdoc__clone.process_q_ret__n_label_set hook
if op.__class__ == DBO_aifnode__clone:
op.process_q_ret__n_label_set = self.deco__process_q_ret__n_label_set
return ret
def apply_to_single_query(self, dbq):
q_type = dbq.query_struct_type
clause_set = []
if Query_Struct_Type.w == q_type:
clause_set += dbq.pt_root.clause_set_by_kw('create')
if Query_Struct_Type.r == q_type:
clause_set += dbq.pt_root.clause_set_by_kw('match')
if Query_Struct_Type.rw == q_type:
clause_set += dbq.pt_root.clause_set_by_kw('create')
clause_set += dbq.pt_root.clause_set_by_kw('match')
for c in clause_set:
n_exp_set = c.sub_exp_set_by_type(p_node, recurse=True)
for n_exp in n_exp_set:
if n_exp.parent.__class__ == p_path:
continue;
lbl_set = n_exp.label_set
if not lbl_set: # add label set if necessary
lbl_set = n_exp.spawn_label_set()
lbl_set.add_label(self.ns_label)
# log.debug('db_q trans: in clause: %s, out clause: %s' % (cur_clause, new_clause))
class QT_AIFNODE_NS_Filter(QT_AIFNODE_NS_Filter__common):
def __init__(self, aifnode):
ns_label = aifnode__ns_label(aifnode)
super(QT_AIFNODE_NS_Filter, self).__init__(ns_label)
class QT_RZDOC_NS_Filter(QT_RZDOC_NS_Filter__common):
def __init__(self, rzdoc):
ns_label = rzdoc__ns_label(rzdoc)
super(QT_RZDOC_NS_Filter, self).__init__(ns_label)
class QT_RZDOC_Meta_NS_Filter(QT_RZDOC_NS_Filter__common):
def __init__(self, rzdoc):
ns_label = rzdoc__meta_ns_label(rzdoc)
super(QT_RZDOC_Meta_NS_Filter, self).__init__(ns_label)
class QT_AIFNODE_Meta_NS_Filter(QT_AIFNODE_NS_Filter__common):
def __init__(self, rzdoc):
ns_label = rzdoc__meta_ns_label(rzdoc)
super(QT_RZDOC_Meta_NS_Filter, self).__init__(ns_label)
class QT_Node_Filter__meta_label_set(Query_Transformation):
# TODO: impl
# 'where 0 = length(filter(_lbl in labels(n) where _lbl =~ \'^__.*$\'))', # filter nodes with meta labels
pass
|
ozFri/rhizi
|
rhizi/neo4j_qt.py
|
Python
|
agpl-3.0
| 6,163
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
###########################################################
# © 2011 Daniel 'grindhold' Brendle and Team
#
# This file is part of Skarphed.
#
# Skarphed is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# Skarphed is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Skarphed.
# If not, see http://www.gnu.org/licenses/.
###########################################################
from skarphedadmin.data.Generic import GenericSkarphedObject
from skarphedadmin.data.skarphed.Skarphed import rpc
from Role import Role
class Roles(GenericSkarphedObject):
def __init__(self,parent):
GenericSkarphedObject.__init__(self)
self.par = parent
self.updated()
self.refresh()
def refreshCallback(self,data):
roleIds = [r.getId() for r in self.children]
for role in data:
if role['id'] not in roleIds:
self.addChild(Role(self,role))
else:
self.getRoleById(role['id']).refresh(role)
@rpc(refreshCallback)
def getRoles(self):
pass
def refresh(self):
self.getRoles()
def getRoleById(self,id):
for role in self.children:
if role.getId() == id:
return role
return None
def getName(self):
return "Roles"
def createRoleCallback(self,json):
self.refresh()
@rpc(createRoleCallback)
def createRole(self,roledata):
pass
def createNewRole(self,name):
self.createRole({'name':name})
def getPar(self):
return self.par
def getSkarphed(self):
return self.getPar()
def getServer(self):
return self.getPar().getServer()
|
skarphed/skarphed
|
admin/src/skarphedadmin/data/skarphed/Roles.py
|
Python
|
agpl-3.0
| 2,253
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-01-18 11:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('moderate', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='mozillianprofile',
name='avatar_url',
field=models.URLField(blank=True, default=b'', max_length=400),
),
]
|
johngian/mozmoderator
|
moderator/moderate/migrations/0002_auto_20170118_1120.py
|
Python
|
agpl-3.0
| 480
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron. Copyright Yannick Buron
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Base recursive model',
'version': '1.0',
'category': 'base',
'author': 'Yannick Buron',
'license': 'AGPL-3',
'website': 'https://github.com/OCA/server-tools',
'depends': ['base'],
'data': ['security/ir.model.access.csv'],
'installable': True,
}
|
YannickB/server-tools
|
base_recursive_model/__openerp__.py
|
Python
|
agpl-3.0
| 1,216
|
# Copyright 2020 Camptocamp SA
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl)
from odoo.tests import SavepointCase
class TestCommonMoveDest(SavepointCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.env = cls.env(context=dict(cls.env.context, tracking_disable=True))
cls.partner_delta = cls.env.ref("base.res_partner_4")
cls.warehouse = cls.env.ref("stock.warehouse0")
cls.warehouse.write({"delivery_steps": "pick_pack_ship"})
cls.customers_location = cls.env.ref("stock.stock_location_customers")
cls.output_location = cls.env.ref("stock.stock_location_output")
cls.packing_location = cls.env.ref("stock.location_pack_zone")
cls.stock_shelf_location = cls.env.ref("stock.stock_location_components")
cls.stock_shelf_2_location = cls.env.ref("stock.stock_location_14")
cls.out_type = cls.warehouse.out_type_id
cls.pack_type = cls.warehouse.pack_type_id
cls.pick_type = cls.warehouse.pick_type_id
cls.product_1 = cls.env["product.product"].create(
{"name": "Product 1", "type": "product"}
)
cls.product_2 = cls.env["product.product"].create(
{"name": "Product 2", "type": "product"}
)
cls.procurement_group_1 = cls.env["procurement.group"].create(
{"name": "Test 1"}
)
def _init_inventory(self):
# Product 1 on shelf 1
# Product 2 on shelf 2
inventory = self.env["stock.inventory"].create({"name": "Test init"})
inventory.action_start()
product_location_list = [
(self.product_1, self.stock_shelf_location),
(self.product_2, self.stock_shelf_2_location),
]
lines_vals = list()
for product, location in product_location_list:
lines_vals.append(
(
0,
0,
{
"product_id": product.id,
"product_uom_id": product.uom_id.id,
"product_qty": 10.0,
"location_id": location.id,
},
)
)
inventory.write({"line_ids": lines_vals})
inventory.action_validate()
def _create_pickings(self):
# Create delivery order
ship_order = self.env["stock.picking"].create(
{
"partner_id": self.partner_delta.id,
"location_id": self.output_location.id,
"location_dest_id": self.customers_location.id,
"picking_type_id": self.out_type.id,
}
)
pack_order = self.env["stock.picking"].create(
{
"partner_id": self.partner_delta.id,
"location_id": self.packing_location.id,
"location_dest_id": self.output_location.id,
"picking_type_id": self.pack_type.id,
}
)
pick_order = self.env["stock.picking"].create(
{
"partner_id": self.partner_delta.id,
"location_id": self.stock_shelf_location.id,
"location_dest_id": self.packing_location.id,
"picking_type_id": self.pick_type.id,
}
)
pick_order_2 = self.env["stock.picking"].create(
{
"partner_id": self.partner_delta.id,
"location_id": self.stock_shelf_2_location.id,
"location_dest_id": self.packing_location.id,
"picking_type_id": self.pick_type.id,
}
)
return ship_order, pack_order, pick_order, pick_order_2
def _create_move(
self,
picking,
product,
state="waiting",
procure_method="make_to_order",
move_dest=None,
):
move_vals = {
"name": product.name,
"product_id": product.id,
"product_uom_qty": 2.0,
"product_uom": product.uom_id.id,
"picking_id": picking.id,
"location_id": picking.location_id.id,
"location_dest_id": picking.location_dest_id.id,
"state": state,
"procure_method": procure_method,
"group_id": self.procurement_group_1.id,
}
if move_dest:
move_vals["move_dest_ids"] = [(4, move_dest.id, False)]
return self.env["stock.move"].create(move_vals)
def test_packing_sub_location(self):
self._init_inventory()
(
ship_order_1,
pack_order_1,
pick_order_1a,
pick_order_1b,
) = self._create_pickings()
ship_move_1a = self._create_move(ship_order_1, self.product_1)
pack_move_1a = self._create_move(
pack_order_1, self.product_1, move_dest=ship_move_1a
)
pick_move_1a = self._create_move(
pick_order_1a,
self.product_1,
state="confirmed",
procure_method="make_to_stock",
move_dest=pack_move_1a,
)
ship_move_1b = self._create_move(ship_order_1, self.product_2)
pack_move_1b = self._create_move(
pack_order_1, self.product_2, move_dest=ship_move_1b
)
pick_move_1b = self._create_move(
pick_order_1b,
self.product_2,
state="confirmed",
procure_method="make_to_stock",
move_dest=pack_move_1b,
)
self.assertEqual(pick_move_1a.common_dest_move_ids, pick_move_1b)
self.assertEqual(pick_move_1b.common_dest_move_ids, pick_move_1a)
self.assertEqual(pack_move_1a.common_dest_move_ids, pack_move_1b)
self.assertEqual(pack_move_1b.common_dest_move_ids, pack_move_1a)
self.assertFalse(ship_move_1a.common_dest_move_ids)
self.assertFalse(ship_move_1b.common_dest_move_ids)
self.assertEqual(
self.env["stock.move"].search(
[("common_dest_move_ids", "=", pick_move_1b.id)]
),
pick_move_1a,
)
self.assertEqual(
self.env["stock.move"].search(
[("common_dest_move_ids", "=", pick_move_1a.id)]
),
pick_move_1b,
)
self.assertEqual(
self.env["stock.move"].search(
[("common_dest_move_ids", "in", (pick_move_1a | pick_move_1b).ids)]
),
pick_move_1a | pick_move_1b,
)
|
OCA/stock-logistics-warehouse
|
stock_move_common_dest/tests/test_move_common_dest.py
|
Python
|
agpl-3.0
| 6,550
|
# Generated by Django 4.0 on 2022-02-28 08:55
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
("observations", "0008_use_explicit_base_manager_name"),
]
operations = [
migrations.AlterField(
model_name="observation",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_%(app_label)s.%(class)s_set+",
to="contenttypes.contenttype",
),
),
]
|
City-of-Helsinki/smbackend
|
observations/migrations/0009_alter_observation_polymorphic_ctype.py
|
Python
|
agpl-3.0
| 750
|
from telecommand import Telecommand
class PingTelecommand(Telecommand):
def __init__(self):
Telecommand.__init__(self)
def apid(self):
return 0x50
def payload(self):
return []
|
PW-Sat2/PWSat2OBC
|
integration_tests/telecommand/ping.py
|
Python
|
agpl-3.0
| 224
|
# Copyright (C) 2021 Sebastian Pipping <sebastian@pipping.org>
# Licensed under GNU Affero GPL v3 or later
from django.http import FileResponse
from django.test import TestCase
from django.urls import reverse
from parameterized import parameterized
from ..favicon import FAVICON_FILES
class FaviconTest(TestCase):
@parameterized.expand(FAVICON_FILES)
def test_file_served_properly(self, path):
url = reverse('favicon', kwargs={'path': path})
response = self.client.get(url)
self.assertIsInstance(response, FileResponse)
|
hartwork/wnpp.debian.net
|
wnpp_debian_net/views/tests/test_favicon.py
|
Python
|
agpl-3.0
| 557
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import webnotes
class TestItem(unittest.TestCase):
def test_duplicate_item(self):
from stock.doctype.item_price.item_price import ItemPriceDuplicateItem
bean = webnotes.bean(copy=test_records[0])
self.assertRaises(ItemPriceDuplicateItem, bean.insert)
test_records = [
[
{
"doctype": "Item Price",
"price_list": "_Test Price List",
"item_code": "_Test Item",
"ref_rate": 100
}
]
]
|
gangadhar-kadam/sapphire_app
|
stock/doctype/item_price/test_item_price.py
|
Python
|
agpl-3.0
| 576
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import json
import six
from django.core.paginator import EmptyPage, Paginator
from django.db.models import Manager, Q, QuerySet
from django.http.response import JsonResponse
from django.template.defaultfilters import yesno
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import get_language
from shuup.admin.utils.urls import get_model_url, NoModelUrl
from shuup.utils.dates import try_parse_date
from shuup.utils.objects import compact
from shuup.utils.serialization import ExtendedJSONEncoder
def maybe_callable(thing, context=None):
"""
If `thing` is callable, return it.
If `thing` names a callable attribute of `context`, return it.
"""
if callable(thing):
return thing
if isinstance(thing, six.string_types):
thing = getattr(context, thing, None)
if callable(thing):
return thing
return None
def maybe_call(thing, context, args=None, kwargs=None):
"""
If `thing` is callable, call it with args and kwargs and return the value.
If `thing` names a callable attribute of `context`, call it with args and kwargs and return the value.
Otherwise return `thing`.
"""
func = maybe_callable(context=context, thing=thing)
if func:
thing = func(*(args or ()), **(kwargs or {}))
return thing
class Filter(object):
type = None
def to_json(self, context):
return None
def filter_queryset(self, queryset, column, value):
return queryset # pragma: no cover
class ChoicesFilter(Filter):
type = "choices"
def __init__(self, choices=None, filter_field=None, default=None):
self.filter_field = filter_field
self.choices = choices
self.default = default
def _flatten_choices(self, context):
if not self.choices:
return None
choices = maybe_call(self.choices, context=context)
if isinstance(choices, QuerySet):
choices = [(c.pk, c) for c in choices]
return [("_all", "---------")] + [
(force_text(value, strings_only=True), force_text(display))
for (value, display)
in choices
]
def to_json(self, context):
return {
"choices": self._flatten_choices(context),
"defaultChoice": self.default
}
def filter_queryset(self, queryset, column, value):
if value == "_all":
return queryset
return queryset.filter(**{(self.filter_field or column.id): value})
class Select2Filter(ChoicesFilter):
type = "select2"
def to_json(self, context):
json_dict = super(Select2Filter, self).to_json(context)
json_dict["select2"] = True
return json_dict
class MPTTFilter(Select2Filter):
type = "mptt"
def filter_queryset(self, queryset, column, value):
qs = super(MPTTFilter, self).filter_queryset(queryset, column, value)
return qs.get_descendants(include_self=True)
class RangeFilter(Filter):
type = "range"
def __init__(self, min=None, max=None, step=None, field_type=None, filter_field=None):
"""
:param filter_field: Filter field (Django query expression). If None, column ID is used.
:type filter_field: str|None
:param min: Minimum value.
:param max: Maximum value.
:param step: Step value. See the HTML5 documentation for semantics.
:param field_type: Field type string. See the HTML5 documentation for semantics.
:type field_type: str|None
"""
self.filter_field = filter_field
self.min = min
self.max = max
self.step = step
self.field_type = field_type
def to_json(self, context):
return {
"range": compact({
"min": maybe_call(self.min, context=context),
"max": maybe_call(self.max, context=context),
"step": maybe_call(self.step, context=context),
"type": self.field_type,
})
}
def filter_queryset(self, queryset, column, value):
if value:
min = value.get("min")
max = value.get("max")
q = {}
filter_field = (self.filter_field or column.id)
if min is not None:
q["%s__gte" % filter_field] = min
if max is not None:
q["%s__lte" % filter_field] = max
if q:
queryset = queryset.filter(**q)
return queryset
class DateRangeFilter(RangeFilter):
def __init__(self, *args, **kwargs):
super(DateRangeFilter, self).__init__(*args, **kwargs)
if not self.field_type:
self.field_type = "date"
def filter_queryset(self, queryset, column, value):
if value:
value = {
"min": try_parse_date(value.get("min")),
"max": try_parse_date(value.get("max")),
}
return super(DateRangeFilter, self).filter_queryset(queryset, column, value)
class TextFilter(Filter):
type = "text"
def __init__(self, field_type=None, placeholder=None, operator="icontains", filter_field=None):
"""
:param filter_field: Filter field (Django query expression). If None, column ID is used.
:type filter_field: str|None
:param field_type: Field type string. See the HTML5 documentation for semantics.
:type field_type: str|None
:param placeholder: Field placeholder string.
:type placeholder: str|None
:param operator: Django operator for the queryset.
:type operator: str
"""
self.filter_field = filter_field
self.field_type = field_type
self.placeholder = placeholder
self.operator = operator
def to_json(self, context):
return {
"text": compact({
"type": self.field_type,
"placeholder": force_text(self.placeholder) if self.placeholder else None,
})
}
def filter_queryset(self, queryset, column, value):
if value:
value = force_text(value).strip()
if value:
return queryset.filter(**{"%s__%s" % ((self.filter_field or column.id), self.operator): value})
return queryset
class MultiFieldTextFilter(TextFilter):
def __init__(self, filter_fields, **kwargs):
"""
:param filter_field: List of Filter fields (Django query expression).
:type filter_field: list<str>
:param kwargs: Kwargs for `TextFilter`.
"""
super(MultiFieldTextFilter, self).__init__(**kwargs)
self.filter_fields = tuple(filter_fields)
def filter_queryset(self, queryset, column, value):
if value:
q = Q()
for filter_field in self.filter_fields:
q |= Q(**{"%s__%s" % (filter_field, self.operator): value})
return queryset.filter(q)
return queryset
true_or_false_filter = ChoicesFilter([
(False, _("no")),
(True, _("yes"))
])
class Column(object):
def __init__(self, id, title, **kwargs):
self.id = id
self.title = title
self.sort_field = kwargs.pop("sort_field", id)
self.display = kwargs.pop("display", id)
self.class_name = kwargs.pop("class_name", None)
self.filter_config = kwargs.pop("filter_config", None)
self.sortable = bool(kwargs.pop("sortable", True))
self.linked = bool(kwargs.pop("linked", True))
self.raw = bool(kwargs.pop("raw", False))
if kwargs and type(self) is Column: # If we're not derived, validate that client code doesn't fail
raise NameError("Unexpected kwarg(s): %s" % kwargs.keys())
def to_json(self, context=None):
out = {
"id": force_text(self.id),
"title": force_text(self.title),
"className": force_text(self.class_name) if self.class_name else None,
"filter": self.filter_config.to_json(context=context) if self.filter_config else None,
"sortable": bool(self.sortable),
"linked": bool(self.linked),
"raw": bool(self.raw),
}
return dict((key, value) for (key, value) in six.iteritems(out) if value is not None)
def sort_queryset(self, queryset, desc=False):
order_by = ("-" if desc else "") + self.sort_field
if "translations__" in self.sort_field:
queryset = queryset.translated(get_language())
return queryset.order_by(order_by)
def filter_queryset(self, queryset, value):
if self.filter_config:
queryset = self.filter_config.filter_queryset(queryset, self, value)
return queryset
def get_display_value(self, context, object):
display_callable = maybe_callable(self.display, context=context)
if display_callable:
return display_callable(object)
value = object
for bit in self.display.split("__"):
value = getattr(value, bit, None)
if isinstance(value, bool):
value = yesno(value)
if isinstance(value, Manager):
value = ", ".join("%s" % x for x in value.all())
if not value:
value = ""
return force_text(value)
def __repr__(self):
return "<Column: %s> %s" % (self.title, self.id)
class Picotable(object):
def __init__(self, request, columns, queryset, context):
self.request = request
self.columns = columns
self.queryset = queryset
self.context = context
self.columns_by_id = dict((c.id, c) for c in self.columns)
self.get_object_url = maybe_callable("get_object_url", context=self.context)
self.get_object_abstract = maybe_callable("get_object_abstract", context=self.context)
self.default_filters = self._get_default_filters()
def _get_default_filter(self, column):
filter_config = getattr(column, "filter_config")
if(filter_config and hasattr(filter_config, "default") and filter_config.default is not None):
field = filter_config.filter_field or column.id
return (field, filter_config.default)
else:
return None
def _get_default_filters(self):
filters = {}
for column in self.columns:
default_filter = self._get_default_filter(column)
if default_filter:
filters[default_filter[0]] = default_filter[1]
return filters
def process_queryset(self, query):
queryset = self.queryset
filters = (query.get("filters") or self._get_default_filters())
for column, value in six.iteritems(filters):
column = self.columns_by_id.get(column)
if column:
queryset = column.filter_queryset(queryset, value)
sort = query.get("sort")
if sort:
desc = (sort[0] == "-")
column = self.columns_by_id.get(sort[1:])
if not (column and column.sortable):
raise ValueError("Can't sort by column %r" % sort[1:])
queryset = column.sort_queryset(queryset, desc=desc)
return queryset
def get_data(self, query):
paginator = Paginator(self.process_queryset(query), query["perPage"])
try:
page = paginator.page(int(query["page"]))
except EmptyPage:
page = paginator.page(paginator.num_pages)
out = {
"columns": [c.to_json(context=self.context) for c in self.columns],
"pagination": {
"perPage": paginator.per_page,
"nPages": paginator.num_pages,
"nItems": paginator.count,
"pageNum": page.number,
},
"items": [self.process_item(item) for item in page],
"itemInfo": _("Showing %(per_page)s of %(n_items)s %(verbose_name_plural)s") % {
"per_page": min(paginator.per_page, paginator.count),
"n_items": paginator.count,
"verbose_name_plural": self.get_verbose_name_plural(),
}
}
return out
def process_item(self, object):
object_url = self.get_object_url(object) if callable(self.get_object_url) else None
out = {
"_id": object.id,
"_url": object_url,
"_linked_in_mobile": True if object_url else False
}
for column in self.columns:
out[column.id] = column.get_display_value(context=self.context, object=object)
out["_abstract"] = (self.get_object_abstract(object, item=out) if callable(self.get_object_abstract) else None)
return out
def get_verbose_name_plural(self):
try:
return self.queryset.model._meta.verbose_name_plural
except AttributeError:
return _("objects")
class PicotableViewMixin(object):
url_identifier = None
default_columns = []
columns = []
picotable_class = Picotable
template_name = "shuup/admin/base_picotable.jinja"
def process_picotable(self, query_json):
pico = self.picotable_class(
request=self.request,
columns=self.columns,
queryset=self.get_queryset(),
context=self
)
return JsonResponse(pico.get_data(json.loads(query_json)), encoder=ExtendedJSONEncoder)
def get(self, request, *args, **kwargs):
query = request.GET.get("jq")
if query:
return self.process_picotable(query)
return super(PicotableViewMixin, self).get(request, *args, **kwargs)
def get_object_url(self, instance):
try:
return get_model_url(instance, user=self.request.user)
except NoModelUrl:
pass
return None
def get_object_abstract(self, instance, item):
"""
Get the object abstract lines (used for mobile layouts) for this object.
Supported keys in abstract line dicts are:
* text (required)
* title
* class (CSS class name -- `header` for instance)
* raw (boolean; whether or not the `text` is raw HTML)
:param instance: The instance
:param item: The item dict so far. Useful for reusing precalculated values.
:return: Iterable of dicts to pass through to the picotable javascript
:rtype: Iterable[dict]
"""
return None
def get_filter(self):
filter_string = self.request.GET.get("filter")
return json.loads(filter_string) if filter_string else {}
|
shawnadelic/shuup
|
shuup/admin/utils/picotable.py
|
Python
|
agpl-3.0
| 14,951
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.db import models, connection, transaction
from kamu.votes.models import Member, Party, Session, Vote
from django.contrib.auth.models import User
from django.conf import settings
class QuestionSource(models.Model):
name = models.CharField(max_length=255)
year = models.IntegerField()
url_name = models.SlugField(unique=True)
class Meta:
unique_together = (('name', 'year'), )
def __unicode__(self):
return self.url_name
class QuestionManager(models.Manager):
def get_by_url_name(self, url_name):
if not '/' in url_name:
raise Question.DoesNotExist()
src, order = url_name.split('/')
try:
order = int(order)
except ValueError:
raise Question.DoesNotExist()
return self.get(order=order, source__url_name=src)
class Question(models.Model):
text = models.TextField()
source = models.ForeignKey(QuestionSource, on_delete=models.CASCADE)
order = models.IntegerField()
objects = QuestionManager()
def save(self, *args, **kwargs):
if self.order is None:
q = Question.objects.filter(source=self.source)
max_order = q.aggregate(models.Max('order'))['order__max']
if max_order is None:
max_order = 0
self.order = int(max_order) + 1
super(Question, self).save(*args, **kwargs)
def answers(self):
options = self.option_set.all()
return Answer.objects.filter(option__in=options)
class Meta:
ordering = ('-source__year', 'source__name', 'order', )
unique_together = (('order', 'source'), )
@models.permalink
def get_absolute_url(self):
args = {'source': self.source.url_name, 'question': self.order}
return ('opinions.views.show_question', (), args)
def __unicode__(self):
return "%s/%d" % (self.source, self.order)
class Option(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
order = models.IntegerField()
class Meta:
unique_together = (('question', 'order'), )
ordering = ('question__order', 'order')
def save(self, *args, **kwargs):
if self.order is None:
q = Option.objects.filter(question=self.question)
max_order = q.aggregate(models.Max('order'))['order__max']
if max_order is None:
max_order = 0
self.order = int(max_order) + 1
super(Option, self).save(*args, **kwargs)
def party_shares(self):
# Ah, SQL is so nice and terse
query = \
"""
SELECT votes_party.*,
ROUND(COALESCE(partyvotes/partytotal, 0)*100) AS share
FROM
(SELECT party_id, count(*) as partytotal
FROM opinions_answer, votes_member
WHERE opinions_answer.question_id=%s
AND votes_member.id=opinions_answer.member_id
GROUP BY votes_member.party_id) as totals,
(SELECT votes_member.party_id, count(*) AS partyvotes
FROM opinions_answer, votes_member
WHERE opinions_answer.option_id=%s
AND opinions_answer.member_id=votes_member.id
GROUP BY votes_member.party_id) as stats
RIGHT JOIN votes_party ON votes_party.name=stats.party_id
WHERE votes_party.name = totals.party_id
"""
return Party.objects.raw(query, [self.question_id, self.id])
def __unicode__(self):
return '%s: %s' % (self.question, self.name)
class Answer(models.Model):
member = models.ForeignKey(Member, on_delete=models.CASCADE)
option = models.ForeignKey(Option, on_delete=models.CASCADE, null=True)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
explanation = models.TextField(null=True)
class Meta:
unique_together = (('member', 'option'), )
def __unicode__(self):
return '%s %s' % (self.member, self.option)
class VoteOptionCongruenceManager(models.Manager):
def user_has_congruences(self, user):
if not user.is_authenticated():
return False
return self.filter(user=user).count() > 0
def get_congruence(self, option, session, vote='Y'):
congruence = VoteOptionCongruence.objects.filter(
option=option, session=session,
vote=vote)
if congruence.count() == 0:
return None
congruence = congruence.aggregate(models.Avg('congruence'))
return congruence['congruence__avg']
def __get_average_congruence(self, grouping_object, id_field,
for_user=None, for_question=None):
args = []
extra_where = ""
cong_user = self.get_congruence_user(for_user)
if cong_user is not None:
args.append(cong_user.id)
extra_where += "AND c.user_id=%s\n"
if (for_question is not None):
args.append(for_question.id)
extra_where += "AND o.question_id=%s\n"
session_freq_query = \
"""
SELECT session_id, COUNT(session_id) AS freq
FROM opinions_voteoptioncongruence
GROUP BY session_id
"""
query = \
"""
SELECT
SUM(congruence/f.freq)/SUM(ABS(congruence/f.freq)) AS congruence_avg
FROM
opinions_voteoptioncongruence AS c,
opinions_answer AS a,
opinions_option AS o,
votes_vote AS v,
(%s) AS f
WHERE v.session_id=c.session_id
AND f.session_id=c.session_id
AND a.option_id=c.option_id
AND a.member_id=v.member_id
AND v.vote=c.vote
AND o.id=a.option_id
%s
AND %s=%%s
""" \
% (session_freq_query, extra_where, id_field)
args.append(grouping_object.pk)
cursor = connection.cursor()
count = cursor.execute(query, args)
if count < 1:
return None
return cursor.fetchone()[0]
def get_member_congruence(self, member, **kargs):
return self.__get_average_congruence(member, 'v.member_id', **kargs)
def get_party_congruence(self, party, **kargs):
return self.__get_average_congruence(party, 'v.party', **kargs)
def get_question_congruence(self, question, **kargs):
return self.__get_average_congruence(question, 'a.question_id', **kargs)
def get_congruence_user(self, for_user):
if(for_user is None): return None
if (not VoteOptionCongruence.objects.user_has_congruences(for_user)):
magic_user = VoteOptionCongruence.magic_username
for_user = User.objects.get(username=magic_user)
return for_user
def __get_average_congruences(self, grouping_class, id_field,
descending=True, limit=False, for_user=None,
for_question=None, for_member=None,
for_session=None,
allow_null_congruences=False,
raw_average=False):
session_freq_query = \
"""
SELECT session_id, COUNT(session_id) AS freq
FROM opinions_voteoptioncongruence
GROUP BY session_id
"""
avg = "SUM(congruence/f.freq)/SUM(ABS(congruence/f.freq)) AS congruence_avg"
if(raw_average):
avg = "AVG(congruence) AS congruence_avg"
query = \
"""
SELECT %s.*,
%s
FROM
opinions_voteoptioncongruence as c,
opinions_answer as a,
opinions_option as o,
votes_vote AS v,
(%s) AS f,
%s
WHERE v.session_id=c.session_id
AND f.session_id=c.session_id
AND a.option_id=c.option_id
AND a.member_id=v.member_id
AND v.vote=c.vote
AND o.id=a.option_id
AND %s.%s=%s
%%s
GROUP BY %s
HAVING congruence_avg IS NOT NULL
ORDER BY congruence_avg %s
%s
""" \
% (grouping_class._meta.db_table,
avg,
session_freq_query,
grouping_class._meta.db_table,
grouping_class._meta.db_table,
grouping_class._meta.pk.name,
id_field,
id_field,
('ASC', 'DESC')[descending],
('', 'LIMIT %i' % (int(limit), ))[bool(limit)])
extra_where = ''
query_args = []
cong_user = self.get_congruence_user(for_user)
if cong_user is not None:
query_args.append(cong_user.id)
extra_where += "AND c.user_id=%s\n"
if for_question is not None:
query_args.append(for_question.id)
extra_where += "AND o.question_id=%s\n"
if for_member is not None:
query_args.append(for_member.id)
extra_where += "AND a.member_id=%s\n"
if for_session is not None:
query_args.append(for_session.id)
extra_where += "AND v.session_id=%s\n"
query = query % extra_where
if(allow_null_congruences):
nullquery = \
"""
SELECT *, NULL as congruence_avg
FROM %s
""" % (grouping_class._meta.db_table,)
query = """
%s UNION (%s)
ORDER BY
ISNULL(congruence_avg),
congruence_avg %s
"""%(nullquery, query, ('ASC', 'DESC')[descending])
return grouping_class.objects.raw(query, query_args)
def get_party_congruences(self, **kwargs):
return self.__get_average_congruences(Party, 'v.party', **kwargs)
def get_member_congruences(self, **kwargs):
return self.__get_average_congruences(Member, 'v.member_id', **kwargs)
def get_question_congruences(self, **kwargs):
return self.__get_average_congruences(Question, 'a.question_id', **kwargs)
def get_session_congruences(self, **kwargs):
return self.__get_average_congruences(Session, 'v.session_id', **kwargs)
def get_vote_congruences(self, for_member=None, for_party=None,
for_user=None):
# This could maybe be done without SQL, but my brain
# doesn't work enough for that at the moment
extra_where = ''
args = []
if for_member is not None:
extra_where += "AND v.member_id=%s\n"
args.append(for_member.pk)
if for_party is not None:
extra_where += "AND v.party=%s"
args.append(for_party.pk)
cong_user = self.get_congruence_user(for_user)
if cong_user is not None:
args.append(cong_user.id)
extra_where += "AND c.user_id=%s\n"
session_freq_query = \
"""
SELECT session_id, COUNT(session_id) AS freq
FROM opinions_voteoptioncongruence
GROUP BY session_id
"""
query = \
"""
SELECT
c.congruence/f.freq, c.*
FROM
opinions_voteoptioncongruence AS c,
votes_vote AS v,
opinions_answer AS a,
(%(session_freq)s) AS f
WHERE
c.session_id=v.session_id AND
f.session_id=c.session_id AND
c.vote=v.vote AND
v.member_id=a.member_id AND
a.option_id=c.option_id AND
c.congruence <> 0
%(extra_where)s
ORDER BY
a.question_id, c.option_id
""" % {'session_freq': session_freq_query, 'extra_where': extra_where}
return VoteOptionCongruence.objects.raw(query, args)
class VoteOptionCongruence(models.Model):
option = models.ForeignKey(Option, on_delete=models.CASCADE)
session = models.ForeignKey(Session, on_delete=models.CASCADE)
vote = models.CharField(max_length=1, choices=Vote.VOTE_CHOICES,
db_index=True)
congruence = models.FloatField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
objects = VoteOptionCongruenceManager()
magic_username = settings.KAMU_OPINIONS_MAGIC_USER
def save(self, update_if_exists=False, **kwargs):
if update_if_exists:
congruence = self.congruence
self.congruence = None
matches = VoteOptionCongruence.objects.filter(user=self.user,
option=self.option, session=self.session, vote=self.vote)
if matches.count() > 0:
self = matches[0]
self.congruence = congruence
return models.Model.save(self, **kwargs)
def __unicode__(self):
args = (self.option.question.source, self.option.question.order,
self.option.order, self.vote, self.user, self.congruence)
return "%s/%d/%d-%s (%s): %0.02f" % args
class Meta:
unique_together = (('user', 'option', 'session', 'vote'), )
class QuestionSessionRelevance(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
session = models.ForeignKey(Session, on_delete=models.CASCADE)
relevance = models.FloatField()
user = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)
@classmethod
def get_relevant_sessions(cls, question):
query = \
"""
SELECT votes_session.id, SQRT(AVG(relevance)) AS question_relevance
FROM
(SELECT session_id, relevance
FROM opinions_questionsessionrelevance
WHERE question_id=%s
UNION ALL
SELECT session_id, ABS(congruence) AS relevance
FROM opinions_voteoptioncongruence, opinions_option
WHERE opinions_option.id=option_id AND question_id=%s)
as merged, votes_session
WHERE votes_session.id = session_id
GROUP BY session_id ORDER BY question_relevance DESC
"""
return Session.objects.raw(query, [question.id] * 2)
class Meta:
unique_together = (('question', 'session', 'user'), )
|
kansanmuisti/kamu
|
opinions/models.py
|
Python
|
agpl-3.0
| 14,959
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Pexego All Rights Reserved
# $Jesús Ventosinos Mayor <jesus@pexego.es>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock, wizard
import procurement
|
jgmanzanas/CMNT_004_15
|
project-addons/location_moves/__init__.py
|
Python
|
agpl-3.0
| 1,014
|
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long
"""This module is used for parsing the received lines into a machine readable structure."""
from __future__ import annotations
import re
import typing
import eml_parser.decode
import eml_parser.regex
def noparenthesis(line: str) -> str:
"""Remove nested parenthesis, until none are present.
@FIXME rewrite this function.
Args:
line (str): Input text to search in for parenthesis.
Returns:
str: Return a string with all parenthesis removed.
"""
# check empty string
if not line:
return line
line_ = line
while True:
lline = line_
line_ = eml_parser.regex.noparenthesis_regex.sub('', line_)
if lline == line_:
break
return line_
def cleanline(line: str) -> str:
"""Remove space and ; from start/end of line.
Args:
line (str): Line to clean.
Returns:
str: Cleaned string.
"""
if line == '':
return line
return eml_parser.regex.cleanline_regex.sub('', line)
def get_domain_ip(line: str) -> typing.List[str]:
"""Method returns all domains, IPv4 and IPv6 addresses found in a given string.
Args:
line (str): String to search in.
Returns:
list: Unique list of strings with matches
"""
m = eml_parser.regex.dom_regex.findall(' ' + line) + eml_parser.regex.ipv4_regex.findall(line) + eml_parser.regex.ipv6_regex.findall(line)
return list(set(m))
def parserouting(line: str) -> typing.Dict[str, typing.Any]:
"""This method tries to parsed a e-mail header received line\
and extract machine readable information.
Note that there are a large number of formats for these lines
and a lot of weird ones which are not commonly used.
We try our best to match a large number of formats.
Args:
line (str): Received line to be parsed.
Returns:
dict: Returns a dict with the extracted information.
"""
# if re.findall(reg_date, line):
# return 'date\n'
# Preprocess the line to simplify from/by/with/for border detection.
out = {} # type: typing.Dict[str, typing.Any] # Result
out['src'] = line
line = line.lower() # Convert everything to lowercase
npline = line.replace(')', ' ) ') # normalise space # Re-space () ")by " exists often
npline = npline.replace('(', ' ( ') # normalise space # Re-space ()
npline = npline.replace(';', ' ; ') # normalise space # Re-space ;
npline = noparenthesis(npline) # Remove any "()"
npline = ' '.join(npline.split()) # normalise space
npline = npline.strip('\n') # Remove any new-line
raw_find_data = eml_parser.regex.date_regex.findall(npline) # extract date on end line.
# Detect "sticked lines"
if ' received: ' in npline:
out['warning'] = ['Merged Received headers']
return out
if raw_find_data:
npdate = raw_find_data[0] # Remove spaces and starting ;
npdate = npdate.lstrip(';') # Remove Spaces and stating ; from date
npdate = npdate.strip()
else:
npdate = ''
npline = npline.replace(npdate, '') # Remove date from input line
npline = npline.strip(' ') # Remove any border WhiteSpace
borders = ['from ', 'by ', 'with ', 'for ']
result: typing.List[typing.Dict[str, typing.Any]] = []
# Scan the line to determine the order, and presence of each "from/by/with/for" words
for word in borders:
candidate = list(borders)
candidate.remove(word)
for endword in candidate:
if word in npline:
loc = npline.find(word)
end = npline.find(endword)
if end < loc or end == -1:
end = 0xfffffff # Kindof MAX 31 bits
result.append({'name_in': word, 'pos': loc, 'name_out': endword, 'weight': end + loc})
# print({'name_in': word, 'pos': loc, 'name_out': endword, 'weight': end+loc})
# Create the word list... "from/by/with/for" by sorting the list.
if not result:
out['warning'] = ['Nothing Parsable']
return out
tout = []
for word in borders:
result_max = 0xffffffff
line_max: typing.Dict[str, typing.Any] = {}
for eline in result:
if eline['name_in'] == word and eline['weight'] <= result_max:
result_max = eline['weight']
line_max = eline
if line_max:
tout.append([line_max.get('pos'), line_max.get('name_in')])
# structure is list[list[int, str]]
# we sort based on the first element of the sub list, i.e. int
tout = sorted(tout, key=lambda x: x[0])
# build regex.
reg = ''
for item in tout:
reg += item[1] + '(?P<' + item[1].strip() + '>.*)' # type: ignore
if npdate:
# escape special regex chars
reg += eml_parser.regex.escape_special_regex_chars.sub(r'''\\\1''', npdate)
reparse = re.compile(reg)
reparseg = reparse.search(line)
# Fill the data
if reparseg is not None:
for item in borders: # type: ignore
try:
out[item.strip()] = cleanline(reparseg.group(item.strip())) # type: ignore
except (LookupError, ValueError, AttributeError):
pass
if npdate:
out['date'] = eml_parser.decode.robust_string2date(npdate)
# Fixup for "From" in "for" field
# ie google, do that...
if out.get('for'):
# include spaces in test, otherwise there will be an exception with domains containing "from" in itself
if ' from ' in out.get('for', ''):
temp = re.split(' from ', out['for'])
out['for'] = temp[0]
out['from'] = '{} {}'.format(out['from'], ' '.join(temp[1:]))
m = eml_parser.regex.email_regex.findall(out['for'])
if m:
out['for'] = list(set(m))
else:
del out['for']
# Now.. find IP and Host in from
if out.get('from'):
out['from'] = get_domain_ip(out['from'])
if not out.get('from', []): # if array is empty remove
del out['from']
# Now.. find IP and Host in from
if out.get('by'):
out['by'] = get_domain_ip(out['by'])
if not out.get('by', []): # If array is empty remove
del out['by']
return out
|
sim0nx/eml_parser
|
eml_parser/routing.py
|
Python
|
agpl-3.0
| 6,393
|
from django.contrib import admin
from django.db import models as dmodels
from SIP import models
#get the models from myproject.models]
mods = [x for x in models.__dict__.values() if issubclass(type(x), dmodels.base.ModelBase)]
admins = []
#for each model in our models module, prepare an admin class
#that will edit our model (Admin<model_name>, model)
for c in mods:
admins.append(("%sAdmin"%c.__name__, c))
#create the admin class and register it
for (ac, c) in admins:
try: #pass gracefully on duplicate registration errors
admin.site.register(c, type(ac, (admin.ModelAdmin,), dict()))
except:
pass
|
openiitbombayx/blendedmoocs-mis
|
IITBOMBAYX_PARTNERS/SIP/admin.py
|
Python
|
agpl-3.0
| 637
|