code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# pyDome: A geodesic dome calculator
# Copyright (C) 2013 Daniel Williams
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# load useful modules
#
import numpy as np
import getopt
import sys
#
# load pyDome modules
#
from Polyhedral import *
from SymmetryTriangle import *
from GeodesicSphere import *
from Output import *
from Truncation import *
from BillOfMaterials import *
def display_help():
print
print 'pyDome: A geodesic dome calculator. Copyright 2013 by Daniel Williams'
print
print 'Required Command-Line Input:'
print
print '\t-o, --output=\tPath to output file(s). Extensions will be added. Generates DXF and WRL files by default, but only WRL file when "-F" option is active. Example: \"-o output/test\" produces files output/test.wrl and output/test.dxf.'
print
print 'Options:'
print
print '\t-r, --radius\tRadius of generated dome. Must be floating point. Default 1.0.'
print
print '\t-f, --frequency\tFrequency of generated dome. Must be an integer. Default 4.'
print
print '\t-v, --vthreshold\tDistance required to consider two vertices equal. Default 0.0000001. Must be floating point.'
print
print '\t-t, --truncation\tDistance (ratio) from the bottom to truncate. Default 0.499999. I advise using only the default or 0.333333. Must be floating point.'
print
print '\t-b, --bom-rounding\tThe number of decimal places to round chord length output in the generated Bill of Materials. Default 5. Must be an integer.'
print
print '\t-p, --polyhedron\tEither \"octahedron\" or \"icosahedron\". Default icosahedron.'
print
print '\t-F, --face\tFlag specifying whether to generate face output in WRL file. Cancels DXF file output and cannot be used with truncation.'
print
def main():
#
# default values
#
radius = np.float64(1.)
frequency = 4
polyhedral = Icosahedron()
vertex_equal_threshold = 0.0000001
truncation_amount = 0.499999
run_truncate = False
bom_rounding_precision = 5
face_output = False
output_path = None
#
# no input arguments
#
if len(sys.argv[1:]) == 0:
display_help()
sys.exit(-1)
#
# parse command line
#
try:
opts, args = getopt.getopt(sys.argv[1:], 'r:f:v:t:b:p:Fo:', ['truncation=', 'vthreshold=', 'radius=', 'frequency=', 'help', 'bom-rounding=', 'polyhedron=', 'face', 'output='])
except getopt.error, msg:
print "for help use --help"
sys.exit(-1)
for o, a in opts:
if o in ('-o', '--output'):
output_path = a
if o in ('-p', '--polyhedron'):
if a == 'octahedron':
polyhedral = Octahedron()
if o in ('-b', '--bom-rounding'):
try:
bom_rounding_precision = int(a)
except:
print '-b or --bom-rounding argument must be an integer. Exiting.'
sys.exit(-1)
if o in ('-h', '--help'):
display_help()
sys.exit(0)
if o in ('-F', '--face'):
face_output = True
if o in ('-r', '--radius'):
try:
a = float(a)
radius = np.float64(a)
except:
print '-r or --radius argument must be a floating point number. Exiting.'
sys.exit(-1)
if o in ('-f', '--frequency'):
try:
frequency = int(a)
except:
print '-f or --frequency argument must be an integer. Exiting.'
sys.exit(-1)
if o in ('-v', '--vthreshold'):
try:
a = float(a)
vertex_equal_threshold = np.float64(a)
except:
print '-v or --vthreshold argument must be a floating point number. Exiting.'
sys.exit(-1)
if o in ('-t', '--truncation'):
try:
a = float(a)
truncation_amount = np.float64(a)
run_truncate = True
except:
print '-t or --truncation argument must be a floating point number. Exiting.'
sys.exit(-1)
#
# check for required options
#
if output_path == None:
print 'An output path and filename is required. Use the -o argument. Exiting.'
sys.exit(-1)
#
# check for mutually exclusive options
#
if face_output and run_truncate:
print 'Truncation does not work with face output at this time. Use either -t or -F but not both.'
exit(-1)
#
# generate geodesic sphere
#
symmetry_triangle = ClassOneMethodOneSymmetryTriangle(frequency, polyhedral)
sphere = GeodesicSphere(polyhedral, symmetry_triangle, vertex_equal_threshold, radius)
C_sphere = sphere.non_duplicate_chords
F_sphere = sphere.non_duplicate_face_nodes
V_sphere = sphere.sphere_vertices
#
# truncate
#
V = V_sphere
C = C_sphere
if run_truncate:
V, C = truncate(V_sphere, C_sphere, truncation_amount)
#
# write model output
#
if face_output:
OutputFaceVRML(V, F_sphere, output_path + '.wrl')
else:
OutputWireframeVRML(V, C, output_path + '.wrl')
OutputDXF(V, C, output_path + '.dxf')
#
# bill of materials
#
get_bill_of_materials(V, C, bom_rounding_precision)
#
# run the main function
#
if __name__ == "__main__":
main()
| badassdatascience/pyDome | pyDome.py | Python | gpl-3.0 | 5,599 |
from .message_media_downloadable import DownloadableMediaMessageProtocolEntity
from yowsup.layers.protocol_messages.protocolentities.attributes.attributes_image import ImageAttributes
from yowsup.layers.protocol_messages.protocolentities.attributes.attributes_message_meta import MessageMetaAttributes
from yowsup.layers.protocol_messages.protocolentities.attributes.attributes_message import MessageAttributes
class ImageDownloadableMediaMessageProtocolEntity(DownloadableMediaMessageProtocolEntity):
def __init__(self, image_attrs, message_meta_attrs):
# type: (ImageAttributes, MessageMetaAttributes) -> None
super(ImageDownloadableMediaMessageProtocolEntity, self).__init__(
"image", MessageAttributes(image=image_attrs), message_meta_attrs
)
@property
def media_specific_attributes(self):
return self.message_attributes.image
@property
def downloadablemedia_specific_attributes(self):
return self.message_attributes.image.downloadablemedia_attributes
@property
def width(self):
return self.media_specific_attributes.width
@width.setter
def width(self, value):
self.media_specific_attributes.width = value
@property
def height(self):
return self.media_specific_attributes.height
@height.setter
def height(self, value):
self.media_specific_attributes.height = value
@property
def jpeg_thumbnail(self):
return self.media_specific_attributes.jpeg_thumbnail
@jpeg_thumbnail.setter
def jpeg_thumbnail(self, value):
self.media_specific_attributes.jpeg_thumbnail = value if value is not None else b""
@property
def caption(self):
return self.media_specific_attributes.caption
@caption.setter
def caption(self, value):
self.media_specific_attributes.caption = value if value is not None else ""
| tgalal/yowsup | yowsup/layers/protocol_media/protocolentities/message_media_downloadable_image.py | Python | gpl-3.0 | 1,903 |
import uuid
from path_helpers import path
import numpy as np
try:
from base_node_rpc.proxy import ConfigMixinBase, StateMixinBase
import arduino_helpers.hardware.teensy as teensy
from .node import (Proxy as _Proxy, I2cProxy as _I2cProxy,
SerialProxy as _SerialProxy)
class ProxyMixin(object):
'''
Mixin class to add convenience wrappers around methods of the generated
`node.Proxy` class.
For example, expose config and state getters/setters as attributes.
'''
host_package_name = str(path(__file__).parent.name.replace('_', '-'))
def __init__(self, *args, **kwargs):
super(ProxyMixin, self).__init__(*args, **kwargs)
class Proxy(ProxyMixin, _Proxy):
pass
class I2cProxy(ProxyMixin, _I2cProxy):
pass
class SerialProxy(ProxyMixin, _SerialProxy):
pass
except (ImportError, TypeError):
Proxy = None
I2cProxy = None
SerialProxy = None
| ryanfobel/multispeq1.py | multispeq1/proxy.py | Python | gpl-3.0 | 996 |
from django.apps import AppConfig
class ExcelUploadConfig(AppConfig):
name = 'excel_upload'
| Bielicki/lcda | excel_upload/apps.py | Python | gpl-3.0 | 98 |
# Natural Language Toolkit: Classifiers
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Classes and interfaces for labeling tokens with category labels (or
"class labels"). Typically, labels are represented with strings
(such as ``'health'`` or ``'sports'``). Classifiers can be used to
perform a wide range of classification tasks. For example,
classifiers can be used...
- to classify documents by topic
- to classify ambiguous words by which word sense is intended
- to classify acoustic signals by which phoneme they represent
- to classify sentences by their author
Features
========
In order to decide which category label is appropriate for a given
token, classifiers examine one or more 'features' of the token. These
"features" are typically chosen by hand, and indicate which aspects
of the token are relevant to the classification decision. For
example, a document classifier might use a separate feature for each
word, recording how often that word occurred in the document.
Featuresets
===========
The features describing a token are encoded using a "featureset",
which is a dictionary that maps from "feature names" to "feature
values". Feature names are unique strings that indicate what aspect
of the token is encoded by the feature. Examples include
``'prevword'``, for a feature whose value is the previous word; and
``'contains-word(library)'`` for a feature that is true when a document
contains the word ``'library'``. Feature values are typically
booleans, numbers, or strings, depending on which feature they
describe.
Featuresets are typically constructed using a "feature detector"
(also known as a "feature extractor"). A feature detector is a
function that takes a token (and sometimes information about its
context) as its input, and returns a featureset describing that token.
For example, the following feature detector converts a document
(stored as a list of words) to a featureset describing the set of
words included in the document:
>>> # Define a feature detector function.
>>> def document_features(document):
... return dict([('contains-word(%s)' % w, True) for w in document])
Feature detectors are typically applied to each token before it is fed
to the classifier:
>>> # Classify each Gutenberg document.
>>> from nltk.corpus import gutenberg
>>> for fileid in gutenberg.fileids(): # doctest: +SKIP
... doc = gutenberg.words(fileid) # doctest: +SKIP
... print fileid, classifier.classify(document_features(doc)) # doctest: +SKIP
The parameters that a feature detector expects will vary, depending on
the task and the needs of the feature detector. For example, a
feature detector for word sense disambiguation (WSD) might take as its
input a sentence, and the index of a word that should be classified,
and return a featureset for that word. The following feature detector
for WSD includes features describing the left and right contexts of
the target word:
>>> def wsd_features(sentence, index):
... featureset = {}
... for i in range(max(0, index-3), index):
... featureset['left-context(%s)' % sentence[i]] = True
... for i in range(index, max(index+3, len(sentence))):
... featureset['right-context(%s)' % sentence[i]] = True
... return featureset
Training Classifiers
====================
Most classifiers are built by training them on a list of hand-labeled
examples, known as the "training set". Training sets are represented
as lists of ``(featuredict, label)`` tuples.
"""
from nltk.classify.api import ClassifierI, MultiClassifierI
from nltk.classify.megam import config_megam, call_megam
from nltk.classify.weka import WekaClassifier, config_weka
from nltk.classify.naivebayes import NaiveBayesClassifier
from nltk.classify.positivenaivebayes import PositiveNaiveBayesClassifier
from nltk.classify.decisiontree import DecisionTreeClassifier
from nltk.classify.rte_classify import rte_classifier, rte_features, RTEFeatureExtractor
from nltk.classify.util import accuracy, apply_features, log_likelihood
from nltk.classify.scikitlearn import SklearnClassifier
from nltk.classify.maxent import (MaxentClassifier, BinaryMaxentFeatureEncoding,
TypedMaxentFeatureEncoding,
ConditionalExponentialClassifier)
from nltk.classify.senna import Senna
from nltk.classify.textcat import TextCat
| adazey/Muzez | libs/nltk/classify/__init__.py | Python | gpl-3.0 | 4,636 |
#!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2012-2017 DESY, Jan Kotanski <jkotan@mail.desy.de>
#
# nexdatas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
#
""" Types converters """
import numpy
import sys
if sys.version_info > (3,):
long = int
def nptype(dtype):
""" converts to numpy types
:param dtype: h5 writer type type
:type dtype: :obj:`str`
:returns: nupy type
:rtype: :obj:`str`
"""
if str(dtype) in ['string', b'string']:
return 'str'
return dtype
class Converters(object):
""" set of converters
"""
@classmethod
def toBool(cls, value):
""" converts to bool
:param value: variable to convert
:type value: any
:returns: result in bool type
:rtype: :obj:`bool`
"""
if type(value).__name__ == 'str' or type(value).__name__ == 'unicode':
lvalue = value.strip().lower()
if lvalue == 'false' or lvalue == '0':
return False
else:
return True
elif value:
return True
return False
class NTP(object):
""" type converter
"""
#: (:obj:`dict` <:obj:`str` ,:obj:`str` >) map of Python:Tango types
pTt = {"long": "DevLong64", "str": "DevString",
"unicode": "DevString", "bool": "DevBoolean",
"int": "DevLong64", "int64": "DevLong64", "int32": "DevLong",
"int16": "DevShort", "int8": "DevUChar", "uint": "DevULong64",
"uint64": "DevULong64", "uint32": "DevULong",
"uint16": "DevUShort",
"uint8": "DevUChar", "float": "DevDouble", "float64": "DevDouble",
"float32": "DevFloat", "float16": "DevFloat",
"string": "DevString", "str": "DevString"}
#: (:obj:`dict` <:obj:`str` , :obj:`str` >) map of NEXUS : numpy types
nTnp = {"NX_FLOAT32": "float32", "NX_FLOAT64": "float64",
"NX_FLOAT": "float64", "NX_NUMBER": "float64",
"NX_INT": "int64", "NX_INT64": "int64",
"NX_INT32": "int32", "NX_INT16": "int16", "NX_INT8": "int8",
"NX_UINT64": "uint64", "NX_UINT32": "uint32",
"NX_UINT16": "uint16",
"NX_UINT8": "uint8", "NX_UINT": "uint64", "NX_POSINT": "uint64",
"NX_DATE_TIME": "string", "ISO8601": "string", "NX_CHAR": "string",
"NX_BOOLEAN": "bool"}
#: (:obj:`dict` <:obj:`str` , :obj:`type` or :obj:`types.MethodType` >) \
#: map of type : converting function
convert = {"float16": float, "float32": float, "float64": float,
"float": float, "int64": long, "int32": int,
"int16": int, "int8": int, "int": int, "uint64": long,
"uint32": long, "uint16": int,
"uint8": int, "uint": int, "string": str, "str": str,
"bool": Converters.toBool}
#: (:obj:`dict` <:obj:`str` , :obj:`str` >) map of tag attribute types
aTn = {"signal": "NX_INT", "axis": "NX_INT", "primary": "NX_INT32",
"offset": "NX_INT", "stride": "NX_INT", "file_time": "NX_DATE_TIME",
"file_update_time": "NX_DATE_TIME", "restricts": "NX_INT",
"ignoreExtraGroups": "NX_BOOLEAN",
"ignoreExtraFields": "NX_BOOLEAN",
"ignoreExtraAttributes": "NX_BOOLEAN",
"minOccus": "NX_INT", "maxOccus": "NX_INT"}
#: (:obj:`dict` <:obj:`str` , :obj:`str` >) \
#: map of vector tag attribute types
aTnv = {"vector": "NX_FLOAT"}
#: (:obj:`dict` <:obj:`int` , :obj:`str` >) map of rank : data format
rTf = {0: "SCALAR", 1: "SPECTRUM", 2: "IMAGE", 3: "VERTEX"}
def arrayRank(self, array):
""" array rank
:brief: It calculates the rank of the array
:param array: given array
:type array: any
:returns: rank
:rtype: :obj:`int`
"""
rank = 0
if hasattr(array, "__iter__") and not \
isinstance(array, (str, bytes)):
try:
rank = 1 + self.arrayRank(array[0])
except IndexError:
if hasattr(array, "shape") and len(array.shape) == 0:
rank = 0
else:
rank = 1
return rank
def arrayRankRShape(self, array):
""" array rank, inverse shape and type
:brief: It calculates the rank, inverse shape and type of
the first element of the list array
:param array: given array
:type array: any
:returns: (rank, inverse shape, type)
:rtype: (:obj:`int` , :obj:`list` <:obj:`int` > , :obj:`str` )
"""
rank = 0
shape = []
pythonDType = None
if hasattr(array, "__iter__") and not \
isinstance(array, (str, bytes)):
try:
rank, shape, pythonDType = self.arrayRankRShape(array[0])
rank += 1
shape.append(len(array))
except IndexError:
if hasattr(array, "shape") and len(array.shape) == 0:
rank = 0
if type(array) in [numpy.string_, numpy.str_]:
pythonDType = "str"
elif hasattr(array, "dtype"):
pythonDType = str(array.dtype)
else:
pythonDType = type(array.tolist()).__name__
else:
rank = 1
shape.append(len(array))
else:
if type(array) in [numpy.string_, numpy.str_]:
pythonDType = "str"
elif hasattr(array, "dtype"):
pythonDType = str(array.dtype)
elif hasattr(array, "tolist"):
pythonDType = type(array.tolist()).__name__
else:
pythonDType = type(array).__name__
return (rank, shape, pythonDType)
def arrayRankShape(self, array):
""" array rank, shape and type
:brief: It calculates the rank, shape and type of
the first element of the list array
:param array: given array
:type array: any
:returns: (rank, shape, type)
:rtype: (:obj:`int` , :obj:`list` <:obj:`int` > , :obj:`str` )
"""
rank, shape, pythonDType = self.arrayRankRShape(array)
if shape:
shape.reverse()
return (rank, shape, pythonDType)
def createArray(self, value, fun=None):
""" creates python array from the given array with applied
the given function to it elements
:param value: given array
:type array: any
:param fun: applied function
:type fun: :obj:`type` or :obj:`types.MethodType`
:returns: created array
:rtype: :obj:`list` <any>
"""
if not hasattr(value, "__iter__") or \
isinstance(value, (str, bytes)):
return fun(value) if fun else value
else:
return [self.createArray(v, fun) for v in value]
| nexdatas/writer | nxswriter/Types.py | Python | gpl-3.0 | 7,676 |
'''
Solution for Eight Queens Problem Using the Genetic Algorithms
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Copyright (C) 2012 Enes Ates
Authors: Enes Ates - enes@enesates.com
'''
import random
from random import randint
class Chromosome():
def __init__(self):
self.queens = []
self.fitness = 0
class GA_8Queens:
def __init__(self):
self.population = 10
self.chromosomes = []
self.prob_crossover = 0.9 # crossover possibility
self.prob_mutation = 0.1 # mutation possibility
self.fitness = 28 # successor value
self.generation = 0
self.max_generation = 50
self.optimum = Chromosome()
self.crossover_method = "two-point"
def run(self):
self.create_initial_chromosomes()
while not self.success():
self.next_gen()
self.generation_info(self.chromosomes)
print "Result", self.optimum.queens, self.optimum.fitness
def success(self):
return self.generation >= self.max_generation or \
self.fitness == self.optimum.fitness
def next_gen(self):
next_generation = []
self.generation += 1
success = False
next_generation.append(self.optimum)
while len(next_generation) < self.population and success == False:
success = self.crossover(self.chromosomes, next_generation)
self.chromosomes = next_generation
def generation_info(self, chromosomes):
i=0
print "\n\n"
print self.generation, ". generation"
print "-----------------------------\n"
for chrom in chromosomes:
i += 1
print i, ". chromosome: ", chrom.queens, ", fitness: ", chrom.fitness
if (chrom.fitness > self.optimum.fitness):
self.optimum = chrom
print "Optimum:", self.optimum.queens, self.optimum.fitness
def create_initial_chromosomes(self):
for i in range(0, self.population):
chromosome = Chromosome()
chromosome.queens = range(0, 8)
random.shuffle(chromosome.queens)
chromosome.fitness = self.calc_fitness(chromosome.queens)
self.chromosomes.append(chromosome)
self.generation += 1
self.generation_info(self.chromosomes)
def calc_fitness(self, queens):
fitness = self.fitness
for i in range(0, 8):
for j in range(i+1, 8):
if((j-i) == abs(queens[i] - queens[j])):
fitness -= 1
return fitness
def crossover(self, chromosomes, next_generation):
first_chrom = self.choose_chromosome(chromosomes)
chromosomes.remove(first_chrom)
second_chrom = self.choose_chromosome(chromosomes)
chromosomes.append(first_chrom)
if random.random() < self.prob_crossover:
child_1 = Chromosome()
child_2 = Chromosome()
if self.crossover_method == "one-point":
child_1.queens = first_chrom.queens[0:5] + second_chrom.queens[5:8]
child_2.queens = second_chrom.queens[0:5] + first_chrom.queens[5:8]
elif self.crossover_method == "two-point":
child_1.queens = first_chrom.queens[0:3] + second_chrom.queens[3:6] + first_chrom.queens[6:8]
child_2.queens = second_chrom.queens[0:3] + first_chrom.queens[3:6] + second_chrom.queens[6:8]
elif self.crossover_method == "random-point":
for i in range(0,8):
first, second = random.sample([first_chrom.queens[i], second_chrom.queens[i]], 2)
child_1.queens.append(first), child_2.queens.append(second)
child_1.fitness = self.calc_fitness(child_1.queens)
child_2.fitness = self.calc_fitness(child_2.queens)
if child_1.fitness == self.fitness or child_2.fitness == self.fitness:
success = True
print "Crossover result:", first_chrom.queens, "with", second_chrom.queens, "-->", child_1.queens, "fitness:", child_1.fitness
success = self.mutation(child_1, next_generation)
print "Crossover result:", first_chrom.queens, "with", second_chrom.queens, "-->", child_2.queens, "fitness:", child_2.fitness
success = self.mutation(child_2, next_generation)
else:
success = self.mutation(first_chrom, next_generation)
success = self.mutation(second_chrom, next_generation)
return success
def mutation(self, chromosome, next_generation):
for i in range(0,8):
if random.random() < self.prob_mutation:
chromosome.queens[i] = random.randint(0, 7)
chromosome.fitness = self.calc_fitness(chromosome.queens)
print "Mutation result:", chromosome.queens, "fitness:", chromosome.fitness
next_generation.append(chromosome)
if chromosome.fitness == self.fitness:
return True
else:
return False
def choose_chromosome(self, chromosomes):
total_fitness = 0
for chrom in chromosomes:
total_fitness += chrom.fitness
rand = randint(1, total_fitness)
roulette = 0
for chrom in self.chromosomes:
roulette += chrom.fitness
if rand <= roulette:
return chrom
if __name__ == "__main__":
ga8 = GA_8Queens()
ga8.run() | enesates/GA_8Queens | GA_8Queens.py | Python | gpl-3.0 | 6,582 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016 Michael Gruener <michael.gruener@chaosmoon.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: letsencrypt
author: "Michael Gruener (@mgruener)"
version_added: "2.2"
short_description: Create SSL certificates with Let's Encrypt
description:
- "Create and renew SSL certificates with Let's Encrypt. Let’s Encrypt is a
free, automated, and open certificate authority (CA), run for the
public’s benefit. For details see U(https://letsencrypt.org). The current
implementation supports the http-01, tls-sni-02 and dns-01 challenges."
- "To use this module, it has to be executed at least twice. Either as two
different tasks in the same run or during multiple runs."
- "Between these two tasks you have to fulfill the required steps for the
chosen challenge by whatever means necessary. For http-01 that means
creating the necessary challenge file on the destination webserver. For
dns-01 the necessary dns record has to be created. tls-sni-02 requires
you to create a SSL certificate with the appropriate subjectAlternativeNames.
It is I(not) the responsibility of this module to perform these steps."
- "For details on how to fulfill these challenges, you might have to read through
U(https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7)"
- "Although the defaults are chosen so that the module can be used with
the Let's Encrypt CA, the module can be used with any service using the ACME
protocol."
requirements:
- "python >= 2.6"
- openssl
options:
account_key:
description:
- "File containing the the Let's Encrypt account RSA key."
- "Can be created with C(openssl rsa ...)."
required: true
account_email:
description:
- "The email address associated with this account."
- "It will be used for certificate expiration warnings."
required: false
default: null
acme_directory:
description:
- "The ACME directory to use. This is the entry point URL to access
CA server API."
- "For safety reasons the default is set to the Let's Encrypt staging server.
This will create technically correct, but untrusted certificates."
required: false
default: https://acme-staging.api.letsencrypt.org/directory
agreement:
description:
- "URI to a terms of service document you agree to when using the
ACME service at C(acme_directory)."
required: false
default: 'https://letsencrypt.org/documents/LE-SA-v1.1.1-August-1-2016.pdf'
challenge:
description: The challenge to be performed.
required: false
choices: [ 'http-01', 'dns-01', 'tls-sni-02']
default: 'http-01'
csr:
description:
- "File containing the CSR for the new certificate."
- "Can be created with C(openssl csr ...)."
- "The CSR may contain multiple Subject Alternate Names, but each one
will lead to an individual challenge that must be fulfilled for the
CSR to be signed."
required: true
aliases: ['src']
data:
description:
- "The data to validate ongoing challenges."
- "The value that must be used here will be provided by a previous use
of this module."
required: false
default: null
dest:
description: The destination file for the certificate.
required: true
aliases: ['cert']
remaining_days:
description:
- "The number of days the certificate must have left being valid.
If C(cert_days < remaining_days), then it will be renewed.
If the certificate is not renewed, module return values will not
include C(challenge_data)."
required: false
default: 10
'''
EXAMPLES = '''
- letsencrypt:
account_key: /etc/pki/cert/private/account.key
csr: /etc/pki/cert/csr/sample.com.csr
dest: /etc/httpd/ssl/sample.com.crt
register: sample_com_challenge
# perform the necessary steps to fulfill the challenge
# for example:
#
# - copy:
# dest: /var/www/html/{{ sample_com_challenge['challenge_data']['sample.com']['http-01']['resource'] }}
# content: "{{ sample_com_challenge['challenge_data']['sample.com']['http-01']['resource_value'] }}"
# when: sample_com_challenge|changed
- letsencrypt:
account_key: /etc/pki/cert/private/account.key
csr: /etc/pki/cert/csr/sample.com.csr
dest: /etc/httpd/ssl/sample.com.crt
data: "{{ sample_com_challenge }}"
'''
RETURN = '''
cert_days:
description: the number of days the certificate remains valid.
returned: success
type: int
challenge_data:
description: per domain / challenge type challenge data
returned: changed
type: complex
contains:
resource:
description: the challenge resource that must be created for validation
returned: changed
type: string
sample: .well-known/acme-challenge/evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA
resource_value:
description: the value the resource has to produce for the validation
returned: changed
type: string
sample: IlirfxKKXA...17Dt3juxGJ-PCt92wr-oA
authorizations:
description: ACME authorization data.
returned: changed
type: complex
contains:
authorization:
description: ACME authorization object. See https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.1.2
returned: success
type: dict
'''
import binascii
import copy
import locale
import textwrap
from datetime import datetime
def nopad_b64(data):
return base64.urlsafe_b64encode(data).decode('utf8').replace("=", "")
def simple_get(module, url):
resp, info = fetch_url(module, url, method='GET')
result = None
try:
content = resp.read()
except AttributeError:
if info['body']:
content = info['body']
if content:
if info['content-type'].startswith('application/json'):
try:
result = module.from_json(content.decode('utf8'))
except ValueError:
module.fail_json(msg="Failed to parse the ACME response: {0} {1}".format(url, content))
else:
result = content
if info['status'] >= 400:
module.fail_json(msg="ACME request failed: CODE: {0} RESULT: {1}".format(info['status'], result))
return result
def get_cert_days(module, cert_file):
'''
Return the days the certificate in cert_file remains valid and -1
if the file was not found.
'''
if not os.path.exists(cert_file):
return -1
openssl_bin = module.get_bin_path('openssl', True)
openssl_cert_cmd = [openssl_bin, "x509", "-in", cert_file, "-noout", "-text"]
_, out, _ = module.run_command(openssl_cert_cmd, check_rc=True)
try:
not_after_str = re.search(r"\s+Not After\s*:\s+(.*)", out.decode('utf8')).group(1)
not_after = datetime.datetime.fromtimestamp(time.mktime(time.strptime(not_after_str, '%b %d %H:%M:%S %Y %Z')))
except AttributeError:
module.fail_json(msg="No 'Not after' date found in {0}".format(cert_file))
except ValueError:
module.fail_json(msg="Failed to parse 'Not after' date of {0}".format(cert_file))
now = datetime.datetime.utcnow()
return (not_after - now).days
# function source: network/basics/uri.py
def write_file(module, dest, content):
'''
Write content to destination file dest, only if the content
has changed.
'''
changed = False
# create a tempfile with some test content
_, tmpsrc = tempfile.mkstemp()
f = open(tmpsrc, 'wb')
try:
f.write(content)
except Exception as err:
os.remove(tmpsrc)
module.fail_json(msg="failed to create temporary content file: %s" % str(err))
f.close()
checksum_src = None
checksum_dest = None
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
os.remove(tmpsrc)
module.fail_json(msg="Source %s does not exist" % (tmpsrc))
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
module.fail_json(msg="Source %s not readable" % (tmpsrc))
checksum_src = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
# raise an error if copy has no permission on dest
if not os.access(dest, os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s not writable" % (dest))
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s not readable" % (dest))
checksum_dest = module.sha1(dest)
else:
if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination dir %s not writable" % (os.path.dirname(dest)))
if checksum_src != checksum_dest:
try:
shutil.copyfile(tmpsrc, dest)
changed = True
except Exception as err:
os.remove(tmpsrc)
module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err)))
os.remove(tmpsrc)
return changed
class ACMEDirectory(object):
'''
The ACME server directory. Gives access to the available resources
and the Replay-Nonce for a given URI. This only works for
URIs that permit GET requests (so normally not the ones that
require authentication).
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.2
'''
def __init__(self, module):
self.module = module
self.directory_root = module.params['acme_directory']
self.directory = simple_get(self.module, self.directory_root)
def __getitem__(self, key):
return self.directory[key]
def get_nonce(self, resource=None):
url = self.directory_root
if resource is not None:
url = resource
_, info = fetch_url(self.module, url, method='HEAD')
if info['status'] != 200:
self.module.fail_json(msg="Failed to get replay-nonce, got status {0}".format(info['status']))
return info['replay-nonce']
class ACMEAccount(object):
'''
ACME account object. Handles the authorized communication with the
ACME server. Provides access to account bound information like
the currently active authorizations and valid certificates
'''
def __init__(self, module):
self.module = module
self.agreement = module.params['agreement']
self.key = module.params['account_key']
self.email = module.params['account_email']
self.data = module.params['data']
self.directory = ACMEDirectory(module)
self.uri = None
self.changed = False
self._authz_list_uri = None
self._certs_list_uri = None
if not os.path.exists(self.key):
module.fail_json(msg="Account key %s not found" % (self.key))
self._openssl_bin = module.get_bin_path('openssl', True)
pub_hex, pub_exp = self._parse_account_key(self.key)
self.jws_header = {
"alg": "RS256",
"jwk": {
"e": nopad_b64(binascii.unhexlify(pub_exp.encode("utf-8"))),
"kty": "RSA",
"n": nopad_b64(binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))),
},
}
self.init_account()
def get_keyauthorization(self, token):
'''
Returns the key authorization for the given token
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.1
'''
accountkey_json = json.dumps(self.jws_header['jwk'], sort_keys=True, separators=(',', ':'))
thumbprint = nopad_b64(hashlib.sha256(accountkey_json.encode('utf8')).digest())
return "{0}.{1}".format(token, thumbprint)
def _parse_account_key(self, key):
'''
Parses an RSA key file in PEM format and returns the modulus
and public exponent of the key
'''
openssl_keydump_cmd = [self._openssl_bin, "rsa", "-in", key, "-noout", "-text"]
_, out, _ = self.module.run_command(openssl_keydump_cmd, check_rc=True)
pub_hex, pub_exp = re.search(
r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)",
out.decode('utf8'), re.MULTILINE | re.DOTALL).groups()
pub_exp = "{0:x}".format(int(pub_exp))
if len(pub_exp) % 2:
pub_exp = "0{0}".format(pub_exp)
return pub_hex, pub_exp
def send_signed_request(self, url, payload):
'''
Sends a JWS signed HTTP POST request to the ACME server and returns
the response as dictionary
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.2
'''
protected = copy.deepcopy(self.jws_header)
protected["nonce"] = self.directory.get_nonce()
try:
payload64 = nopad_b64(self.module.jsonify(payload).encode('utf8'))
protected64 = nopad_b64(self.module.jsonify(protected).encode('utf8'))
except Exception as e:
self.module.fail_json(msg="Failed to encode payload / headers as JSON: {0}".format(e))
openssl_sign_cmd = [self._openssl_bin, "dgst", "-sha256", "-sign", self.key]
sign_payload = "{0}.{1}".format(protected64, payload64).encode('utf8')
_, out, _ = self.module.run_command(openssl_sign_cmd, data=sign_payload, check_rc=True, binary_data=True)
data = self.module.jsonify({
"header": self.jws_header,
"protected": protected64,
"payload": payload64,
"signature": nopad_b64(out),
})
resp, info = fetch_url(self.module, url, data=data, method='POST')
result = None
try:
content = resp.read()
except AttributeError:
if info['body']:
content = info['body']
if content:
if info['content-type'].startswith('application/json'):
try:
result = self.module.from_json(content.decode('utf8'))
except ValueError:
self.module.fail_json(msg="Failed to parse the ACME response: {0} {1}".format(url, content))
else:
result = content
return result, info
def _new_reg(self, contact=[]):
'''
Registers a new ACME account. Returns True if the account was
created and False if it already existed (e.g. it was not newly
created)
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.3
'''
if self.uri is not None:
return True
new_reg = {
'resource': 'new-reg',
'agreement': self.agreement,
'contact': contact
}
result, info = self.send_signed_request(self.directory['new-reg'], new_reg)
if 'location' in info:
self.uri = info['location']
if info['status'] in [200, 201]:
# Account did not exist
self.changed = True
return True
elif info['status'] == 409:
# Account did exist
return False
else:
self.module.fail_json(msg="Error registering: {0} {1}".format(info['status'], result))
def init_account(self):
'''
Create or update an account on the ACME server. As the only way
(without knowing an account URI) to test if an account exists
is to try and create one with the provided account key, this
method will always result in an account being present (except
on error situations). If the account already exists, it will
update the contact information.
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.3
'''
contact = []
if self.email:
contact.append('mailto:' + self.email)
# if this is not a new registration (e.g. existing account)
if not self._new_reg(contact):
# pre-existing account, get account data...
result, _ = self.send_signed_request(self.uri, {'resource': 'reg'})
# XXX: letsencrypt/boulder#1435
if 'authorizations' in result:
self._authz_list_uri = result['authorizations']
if 'certificates' in result:
self._certs_list_uri = result['certificates']
# ...and check if update is necessary
do_update = False
if 'contact' in result:
if contact != result['contact']:
do_update = True
elif len(contact) > 0:
do_update = True
if do_update:
upd_reg = result
upd_reg['contact'] = contact
result, _ = self.send_signed_request(self.uri, upd_reg)
self.changed = True
def get_authorizations(self):
'''
Return a list of currently active authorizations
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.4
'''
authz_list = {'authorizations': []}
if self._authz_list_uri is None:
# XXX: letsencrypt/boulder#1435
# Workaround, retrieve the known authorization urls
# from the data attribute
# It is also a way to limit the queried authorizations, which
# might become relevant at some point
if (self.data is not None) and ('authorizations' in self.data):
for auth in self.data['authorizations']:
authz_list['authorizations'].append(auth['uri'])
else:
return []
else:
# TODO: need to handle pagination
authz_list = simple_get(self.module, self._authz_list_uri)
authz = []
for auth_uri in authz_list['authorizations']:
auth = simple_get(self.module, auth_uri)
auth['uri'] = auth_uri
authz.append(auth)
return authz
class ACMEClient(object):
'''
ACME client class. Uses an ACME account object and a CSR to
start and validate ACME challenges and download the respective
certificates.
'''
def __init__(self, module):
self.module = module
self.challenge = module.params['challenge']
self.csr = module.params['csr']
self.dest = module.params['dest']
self.account = ACMEAccount(module)
self.directory = self.account.directory
self.authorizations = self.account.get_authorizations()
self.cert_days = -1
self.changed = self.account.changed
if not os.path.exists(self.csr):
module.fail_json(msg="CSR %s not found" % (self.csr))
self._openssl_bin = module.get_bin_path('openssl', True)
self.domains = self._get_csr_domains()
def _get_csr_domains(self):
'''
Parse the CSR and return the list of requested domains
'''
openssl_csr_cmd = [self._openssl_bin, "req", "-in", self.csr, "-noout", "-text"]
_, out, _ = self.module.run_command(openssl_csr_cmd, check_rc=True)
domains = set([])
common_name = re.search(r"Subject:.*? CN=([^\s,;/]+)", out.decode('utf8'))
if common_name is not None:
domains.add(common_name.group(1))
subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE | re.DOTALL)
if subject_alt_names is not None:
for san in subject_alt_names.group(1).split(", "):
if san.startswith("DNS:"):
domains.add(san[4:])
return domains
def _get_domain_auth(self, domain):
'''
Get the status string of the first authorization for the given domain.
Return None if no active authorization for the given domain was found.
'''
if self.authorizations is None:
return None
for auth in self.authorizations:
if (auth['identifier']['type'] == 'dns') and (auth['identifier']['value'] == domain):
return auth
return None
def _add_or_update_auth(self, auth):
'''
Add or update the given authroization in the global authorizations list.
Return True if the auth was updated/added and False if no change was
necessary.
'''
for index, cur_auth in enumerate(self.authorizations):
if (cur_auth['uri'] == auth['uri']):
# does the auth parameter contain updated data?
if cmp(cur_auth, auth) != 0:
# yes, update our current authorization list
self.authorizations[index] = auth
return True
else:
return False
# this is a new authorization, add it to the list of current
# authorizations
self.authorizations.append(auth)
return True
def _new_authz(self, domain):
'''
Create a new authorization for the given domain.
Return the authorization object of the new authorization
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.4
'''
if self.account.uri is None:
return
new_authz = {
"resource": "new-authz",
"identifier": {"type": "dns", "value": domain},
}
result, info = self.account.send_signed_request(self.directory['new-authz'], new_authz)
if info['status'] not in [200, 201]:
self.module.fail_json(msg="Error requesting challenges: CODE: {0} RESULT: {1}".format(info['status'], result))
else:
result['uri'] = info['location']
return result
def _get_challenge_data(self, auth):
'''
Returns a dict with the data for all proposed (and supported) challenges
of the given authorization.
'''
data = {}
# no need to choose a specific challenge here as this module
# is not responsible for fulfilling the challenges. Calculate
# and return the required information for each challenge.
for challenge in auth['challenges']:
type = challenge['type']
token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token'])
keyauthorization = self.account.get_keyauthorization(token)
# NOTE: tls-sni-01 is not supported by choice
# too complex to be useful and tls-sni-02 is an alternative
# as soon as it is implemented server side
if type == 'http-01':
# https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.2
resource = '.well-known/acme-challenge/' + token
value = keyauthorization
elif type == 'tls-sni-02':
# https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.3
token_digest = hashlib.sha256(token.encode('utf8')).hexdigest()
ka_digest = hashlib.sha256(keyauthorization.encode('utf8')).hexdigest()
len_token_digest = len(token_digest)
len_ka_digest = len(ka_digest)
resource = 'subjectAlternativeNames'
value = [
"{0}.{1}.token.acme.invalid".format(token_digest[:len_token_digest / 2], token_digest[len_token_digest / 2:]),
"{0}.{1}.ka.acme.invalid".format(ka_digest[:len_ka_digest / 2], ka_digest[len_ka_digest / 2:]),
]
elif type == 'dns-01':
# https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.4
resource = '_acme-challenge'
value = nopad_b64(hashlib.sha256(keyauthorization).digest()).encode('utf8')
else:
continue
data[type] = {'resource': resource, 'resource_value': value}
return data
def _validate_challenges(self, auth):
'''
Validate the authorization provided in the auth dict. Returns True
when the validation was successful and False when it was not.
'''
for challenge in auth['challenges']:
if self.challenge != challenge['type']:
continue
uri = challenge['uri']
token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token'])
keyauthorization = self.account.get_keyauthorization(token)
challenge_response = {
"resource": "challenge",
"keyAuthorization": keyauthorization,
}
result, info = self.account.send_signed_request(uri, challenge_response)
if info['status'] not in [200, 202]:
self.module.fail_json(msg="Error validating challenge: CODE: {0} RESULT: {1}".format(info['status'], result))
status = ''
while status not in ['valid', 'invalid', 'revoked']:
result = simple_get(self.module, auth['uri'])
result['uri'] = auth['uri']
if self._add_or_update_auth(result):
self.changed = True
# draft-ietf-acme-acme-02
# "status (required, string): ...
# If this field is missing, then the default value is "pending"."
if 'status' not in result:
status = 'pending'
else:
status = result['status']
time.sleep(2)
if status == 'invalid':
error_details = ''
# multiple challenges could have failed at this point, gather error
# details for all of them before failing
for challenge in result['challenges']:
if challenge['status'] == 'invalid':
error_details += ' CHALLENGE: {0}'.format(challenge['type'])
if 'error' in challenge:
error_details += ' DETAILS: {0};'.format(challenge['error']['detail'])
else:
error_details += ';'
self.module.fail_json(msg="Authorization for {0} returned invalid: {1}".format(result['identifier']['value'], error_details))
return status == 'valid'
def _new_cert(self):
'''
Create a new certificate based on the csr.
Return the certificate object as dict
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.5
'''
openssl_csr_cmd = [self._openssl_bin, "req", "-in", self.csr, "-outform", "DER"]
_, out, _ = self.module.run_command(openssl_csr_cmd, check_rc=True)
new_cert = {
"resource": "new-cert",
"csr": nopad_b64(out),
}
result, info = self.account.send_signed_request(self.directory['new-cert'], new_cert)
if info['status'] not in [200, 201]:
self.module.fail_json(msg="Error new cert: CODE: {0} RESULT: {1}".format(info['status'], result))
else:
return {'cert': result, 'uri': info['location']}
def _der_to_pem(self, der_cert):
'''
Convert the DER format certificate in der_cert to a PEM format
certificate and return it.
'''
return """-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format(
"\n".join(textwrap.wrap(base64.b64encode(der_cert).decode('utf8'), 64)))
def do_challenges(self):
'''
Create new authorizations for all domains of the CSR and return
the challenge details for the chosen challenge type.
'''
data = {}
for domain in self.domains:
auth = self._get_domain_auth(domain)
if auth is None:
new_auth = self._new_authz(domain)
self._add_or_update_auth(new_auth)
data[domain] = self._get_challenge_data(new_auth)
self.changed = True
elif (auth['status'] == 'pending') or ('status' not in auth):
# draft-ietf-acme-acme-02
# "status (required, string): ...
# If this field is missing, then the default value is "pending"."
self._validate_challenges(auth)
# _validate_challenges updates the global authrozation dict,
# so get the current version of the authorization we are working
# on to retrieve the challenge data
data[domain] = self._get_challenge_data(self._get_domain_auth(domain))
return data
def get_certificate(self):
'''
Request a new certificate and write it to the destination file.
Only do this if a destination file was provided and if all authorizations
for the domains of the csr are valid. No Return value.
'''
if self.dest is None:
return
for domain in self.domains:
auth = self._get_domain_auth(domain)
if auth is None or ('status' not in auth) or (auth['status'] != 'valid'):
return
cert = self._new_cert()
if cert['cert'] is not None:
pem_cert = self._der_to_pem(cert['cert'])
if write_file(self.module, self.dest, pem_cert):
self.cert_days = get_cert_days(self.module, self.dest)
self.changed = True
def main():
module = AnsibleModule(
argument_spec=dict(
account_key=dict(required=True, type='path'),
account_email=dict(required=False, default=None, type='str'),
acme_directory=dict(required=False, default='https://acme-staging.api.letsencrypt.org/directory', type='str'),
agreement=dict(required=False, default='https://letsencrypt.org/documents/LE-SA-v1.1.1-August-1-2016.pdf', type='str'),
challenge=dict(required=False, default='http-01', choices=['http-01', 'dns-01', 'tls-sni-02'], type='str'),
csr=dict(required=True, aliases=['src'], type='path'),
data=dict(required=False, no_log=True, default=None, type='dict'),
dest=dict(required=True, aliases=['cert'], type='path'),
remaining_days=dict(required=False, default=10, type='int'),
),
supports_check_mode=True,
)
# AnsibleModule() changes the locale, so change it back to C because we rely on time.strptime() when parsing certificate dates.
locale.setlocale(locale.LC_ALL, "C")
cert_days = get_cert_days(module, module.params['dest'])
if cert_days < module.params['remaining_days']:
# If checkmode is active, base the changed state solely on the status
# of the certificate file as all other actions (accessing an account, checking
# the authorization status...) would lead to potential changes of the current
# state
if module.check_mode:
module.exit_json(changed=True, authorizations={}, challenge_data={}, cert_days=cert_days)
else:
client = ACMEClient(module)
client.cert_days = cert_days
data = client.do_challenges()
client.get_certificate()
module.exit_json(changed=client.changed, authorizations=client.authorizations, challenge_data=data, cert_days=client.cert_days)
else:
module.exit_json(changed=False, cert_days=cert_days)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| erwilan/ansible | lib/ansible/modules/web_infrastructure/letsencrypt.py | Python | gpl-3.0 | 32,185 |
from player import Player
import pandas as pd
import time
import asyncio
from midiutil import MIDIFile
PATTERNS = [[0, 0, 0, 0, 0, 0, 0, 0], #0
[1, 0, 0, 0, 0, 0, 0, 0], #1
[1, 0, 0, 0, 1, 0, 0, 0], #2
[1, 0, 1, 0, 1, 0, 0, 0], #3
[1, 0, 1, 0, 1, 0, 1, 0], #4
[1, 1, 1, 0, 1, 0, 1, 0], #5
[1, 1, 1, 0, 1, 1, 1, 0], #6
[1, 1, 1, 1, 1, 1, 1, 1]] #7
MAX_MB = [10,10,1,1] # max mb, these values directly impact the intensity of the patterns generated
def pattern_from_mb_interval(mb, max_mb):
''' maps mb count to one of the patterns depending on the value'''
pattern_index = int(mb*7/max_mb)
try: # if mb > max_mb value, use the last pattern
pattern = PATTERNS[pattern_index]
except IndexError:
pattern = PATTERNS[7]
return pattern
def index_to_note(index):
'''patterns are converted into a 16 x N matrix where each column is patterns for one of the 16 notes.
given the index of the column, this returns the midi code of the note'''
dt = {0:36, 1:38, 2:41, 3:43,4:48,5:50,6:53,7:55,8:60,9:96,10:65,11:67,12:72,13:74,14:77,15:79}
return dt[index]
'''DATA PROCESSING'''
def fix_empty_regions(arr):
''' fills empty values so there would always be 4 rows for each second'''
new_list = []
counter = 1
index = 0
while True:
try:
i = arr[index]
except:
break
if counter == int(i[3]):
new_list.append(i)
else:
new_list.append((i[0],i[1],i[2],counter, 0, 0, 0,0,0,0))
index-=1
if counter > 3:
counter = 1
else:
counter+=1
index+=1
return new_list
def group_to_regions(fixed_data):
'''groups the data into lists of 4 items'''
pats = []
for a, b, c, d in zip(*[iter(fixed_data)]*4):
pat = []
for i in [a,b,c,d]:
pat.append(i)
pats.append(pat)
return pats
def get_patterns(grouped_data):
'''convert data into patterns'''
all_patterns = []
for second in grouped_data:
for row in second:
all_patterns.append(pattern_from_mb_interval(row[6], MAX_MB[0]))
all_patterns.append(pattern_from_mb_interval(row[7], MAX_MB[1]))
all_patterns.append(pattern_from_mb_interval(row[8], MAX_MB[2]))
all_patterns.append(pattern_from_mb_interval(row[9], MAX_MB[3]))
return all_patterns
def group_patterns(all_patterns):
'''group the patterns into 16 x N matrix'''
counter = 0
all_pat = []
new_pat = []
for pattern in all_patterns:
new_pat.append(pattern)
counter+=1
if counter > 15:
counter = 0
all_pat.append(new_pat)
new_pat = []
return all_pat
'''UTILS TO PLAY SOUNDS'''
'''
async def play_pattern(pattern, tempo, note_index):
delay = 60/tempo
pitch = index_to_note(note_index)
for note in pattern:
if note == 1:
Player.play_sound(pitch, 127, 2000)
await asyncio.sleep(delay)
else:
await asyncio.sleep(delay)
def sequencer(patterns, tempo):
'in > list of lists of patterns, plays notes using pygame for testing'
for sequences in patterns:
index = 0
ioloop = asyncio.get_event_loop()
tasks = []
for sequence in sequences:
#pitch = index_to_note(index)
tasks.append(ioloop.create_task(play_pattern(sequence, tempo, index)))
index +=1
ioloop.run_until_complete(asyncio.wait(tasks))
ioloop.close()
'''
'''MIDI UTILS'''
def pattern_to_midi(pattern, file, note, start_time, duration = 1, volume=127):
'''writes one pattern to midi'''
track = 0
channel = 0
for ping in pattern:
if ping != 0:
file.addNote(track, channel, note, start_time, duration,volume)
start_time+=1
def patterns_to_midi(patterns, file, start_time):
'''writes 16 patterns to midi, each pattern is for a different note'''
note_index = 0
for pattern in patterns:
note = index_to_note(note_index)
pattern_to_midi(pattern, file, note, start_time)
note_index +=1
def grouped_patterns_to_midi(grouped_patterns, file):
'''takes grouped patterns and writes them to midi'''
start_time = 0
counter = 0
lenght = len(grouped_patterns)
for patterns in grouped_patterns:
print('processed '+ str(counter) + ' patterns out of '+ str(lenght))
counter+=1
patterns_to_midi(patterns, file, start_time)
start_time += 8
data = pd.read_csv('aggregated_data_snippet.csv') # this takes data aggregated into regions from agg_csv_to_csv.py
fixed_data = fix_empty_regions(data.to_records())
grouped_data = group_to_regions(fixed_data)
patterns = get_patterns(grouped_data)
grouped_patterns = group_patterns(patterns)
midi = MIDIFile(1)
midi.addTempo(0, 0, 120)
grouped_patterns_to_midi(grouped_patterns[1000:3000], midi) # python hangs if trying to write the whole file
print('Done processing, writing to file')
output_file = open("test.mid", "wb")
midi.writeFile(output_file)
output_file.close() | Technariumas/Marimba | data_analysis/algorithm_to_midi_test.py | Python | gpl-3.0 | 4,817 |
# pylint: disable=missing-module-docstring, missing-class-docstring
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('rdrhc_calendar', '0013_auto_20171016_1915'),
]
operations = [
migrations.RenameField(
model_name='shift',
old_name='user',
new_name='sb_user',
),
migrations.RenameField(
model_name='shiftcode',
old_name='user',
new_name='sb_user',
),
migrations.AlterUniqueTogether(
name='shiftcode',
unique_together=set([('code', 'sb_user', 'role')]),
),
]
| studybuffalo/studybuffalo | study_buffalo/rdrhc_calendar/migrations/0014_auto_20171016_1922.py | Python | gpl-3.0 | 813 |
import difflib
import os
import pytest
from fusesoc.core import Core
def compare_fileset(fileset, name, files):
assert name == fileset.name
for i in range(len(files)):
assert files[i] == fileset.file[i].name
def test_core_info():
tests_dir = os.path.dirname(__file__)
cores_root = os.path.join(tests_dir, 'cores')
for core_name in ['sockit', 'mor1kx-generic']:
core = Core(os.path.join(cores_root, core_name, core_name+'.core'))
gen_info = '\n'.join([x for x in core.info().split('\n') if not 'Core root' in x])
with open(os.path.join(tests_dir, __name__, core_name+".info")) as f:
assert f.read() == gen_info, core_name
def test_core_parsing():
from fusesoc.vlnv import Vlnv
cores_root = os.path.join(os.path.dirname(__file__), 'cores', 'misc')
core = Core(os.path.join(cores_root, 'nomain.core'))
assert core.name == Vlnv("::nomain:0")
import sys
if sys.version_info[0] > 2:
with pytest.raises(SyntaxError) as e:
core = Core(os.path.join(cores_root, "duplicateoptions.core"))
assert "option 'file_type' in section 'fileset dummy' already exists" in str(e.value)
def test_capi1_get_parameters():
tests_dir = os.path.join(os.path.dirname(__file__),
__name__)
with pytest.raises(SyntaxError) as e:
core = Core(os.path.join(tests_dir, 'parameters_nodatatype.core'))
assert "Invalid datatype '' for parameter" in str(e.value)
with pytest.raises(SyntaxError) as e:
core = Core(os.path.join(tests_dir, 'parameters_invaliddatatype.core'))
assert "Invalid datatype 'badtype' for parameter" in str(e.value)
with pytest.raises(SyntaxError) as e:
core = Core(os.path.join(tests_dir, 'parameters_noparamtype.core'))
assert "Invalid paramtype '' for parameter" in str(e.value)
with pytest.raises(SyntaxError) as e:
core = Core(os.path.join(tests_dir, 'parameters_invalidparamtype.core'))
assert "Invalid paramtype 'badtype' for parameter" in str(e.value)
def test_get_scripts():
flag_combos = [{'target' : 'sim' , 'is_toplevel' : False},
{'target' : 'sim' , 'is_toplevel' : True},
{'target' : 'synth', 'is_toplevel' : False},
{'target' : 'synth', 'is_toplevel' : True},
]
filename = os.path.join(os.path.dirname(__file__), 'cores', 'misc', 'scriptscore.core')
core = Core(filename, '', 'dummy_build_root')
for flags in flag_combos:
env = {
'BUILD_ROOT' : 'dummy_build_root',
'FILES_ROOT' : 'dummyroot'
}
result = core.get_scripts("dummyroot", flags)
expected = {}
if flags['target'] == 'sim':
sections = ['post_run', 'pre_build', 'pre_run']
else:
if flags['is_toplevel']:
env['SYSTEM_ROOT'] = core.files_root
sections = ['pre_build', 'post_build']
else:
sections = []
for section in sections:
_name = flags['target']+section+'_scripts{}'
expected[section] = [{'cmd' : ['sh', os.path.join('dummyroot', _name.format(i))],
'name' : _name.format(i),
'env' : env} for i in range(2)]
assert expected == result
def test_get_tool():
cores_root = os.path.join(os.path.dirname(__file__), 'cores')
core = Core(os.path.join(cores_root, 'atlys', 'atlys.core'))
assert None == core.get_tool({'target' : 'sim', 'tool' : None})
assert 'icarus' == core.get_tool({'target' : 'sim', 'tool' : 'icarus'})
assert 'ise' == core.get_tool({'target' : 'synth', 'tool' : None})
assert 'vivado' == core.get_tool({'target' : 'synth', 'tool' : 'vivado'})
core = Core(os.path.join(cores_root, 'sockit', 'sockit.core'))
assert 'icarus' == core.get_tool({'target' : 'sim', 'tool' : None})
assert 'icarus' == core.get_tool({'target' : 'sim', 'tool' : 'icarus'})
del core.main.backend
assert None == core.get_tool({'target' : 'synth', 'tool' : None})
assert 'vivado' == core.get_tool({'target' : 'synth', 'tool' : 'vivado'})
core.main.backend = 'quartus'
def test_get_tool_options():
cores_root = os.path.join(os.path.dirname(__file__), 'cores')
core = Core(os.path.join(cores_root, 'mor1kx-generic', 'mor1kx-generic.core'))
assert {'iverilog_options' : ['-DSIM']} == core.get_tool_options({'is_toplevel' : True, 'tool' : 'icarus'})
assert {} == core.get_tool_options({'is_toplevel' : True, 'tool' : 'modelsim'})
assert {'fuse_options' : ['some','isim','options']} == core.get_tool_options({'is_toplevel' : True, 'tool' : 'isim'})
expected = {'xelab_options' : ['--timescale 1ps/1ps', '--debug typical',
'dummy', 'options', 'for', 'xelab']}
assert expected == core.get_tool_options({'is_toplevel' : True, 'tool' : 'xsim'})
assert {} == core.get_tool_options({'is_toplevel' : False, 'tool' : 'icarus'})
core = Core(os.path.join(cores_root, 'elf-loader', 'elf-loader.core'))
assert {'libs' : ['-lelf']} == core.get_tool_options({'is_toplevel' : False, 'tool' : 'verilator'})
assert {} == core.get_tool_options({'is_toplevel' : True, 'tool' : 'invalid'})
def test_get_toplevel():
filename = os.path.join(os.path.dirname(__file__),
__name__,
"atlys.core")
core = Core(filename)
assert 'orpsoc_tb' == core.get_toplevel({'tool' : 'icarus'})
assert 'orpsoc_tb' == core.get_toplevel({'tool' : 'icarus', 'testbench' : None})
assert 'tb' == core.get_toplevel({'tool' : 'icarus', 'testbench' : 'tb'})
assert 'orpsoc_top' == core.get_toplevel({'tool' : 'vivado'})
filename = os.path.join(os.path.dirname(__file__),
__name__,
"sockit.core")
core = Core(filename)
assert 'dummy_tb' == core.get_toplevel({'tool' : 'icarus'})
assert 'dummy_tb' == core.get_toplevel({'tool' : 'icarus', 'testbench' : None})
assert 'tb' == core.get_toplevel({'tool' : 'icarus', 'testbench' : 'tb'})
assert 'orpsoc_top' == core.get_toplevel({'tool' : 'vivado'})
def test_icestorm():
filename = os.path.join(os.path.dirname(__file__),
__name__,
"c3demo.core")
core = Core(filename)
assert len(core.file_sets) == 3
compare_fileset(core.file_sets[0], 'rtl_files', ['c3demo.v', 'ledpanel.v','picorv32.v'])
compare_fileset(core.file_sets[1], 'tb_files' , ['firmware.hex', '$YOSYS_DAT_DIR/ice40/cells_sim.v', 'testbench.v'])
#Check that backend files are converted to fileset properly
compare_fileset(core.file_sets[2], 'backend_files', ['c3demo.pcf'])
assert core.file_sets[2].file[0].file_type == 'PCF'
assert core.icestorm.export_files == []
assert core.icestorm.arachne_pnr_options == ['-s', '1', '-d', '8k']
assert core.icestorm.top_module == 'c3demo'
assert core.icestorm.warnings == []
def test_ise():
filename = os.path.join(os.path.dirname(__file__),
__name__,
"atlys.core")
core = Core(filename)
#Check filesets
assert len(core.file_sets) == 4
assert core.file_sets[0].name == 'verilog_src_files'
assert core.file_sets[1].name == 'verilog_tb_src_files'
assert core.file_sets[2].name == 'verilog_tb_private_src_files'
#Check that backend files are converted to fileset properly
compare_fileset(core.file_sets[3], 'backend_files', ['data/atlys.ucf'])
assert core.file_sets[3].file[0].file_type == 'UCF'
#Check backend section
assert core.ise.export_files == []
assert core.ise.family == 'spartan6'
assert core.ise.device == 'xc6slx45'
assert core.ise.package == 'csg324'
assert core.ise.speed == '-2'
assert core.ise.top_module == 'orpsoc_top'
assert core.ise.warnings == []
def test_quartus():
filename = os.path.join(os.path.dirname(__file__),
__name__,
"sockit.core")
core = Core(filename)
#Check filesets
assert len(core.file_sets) == 4
assert core.file_sets[0].name == 'verilog_src_files'
assert core.file_sets[1].name == 'verilog_tb_src_files'
assert core.file_sets[2].name == 'verilog_tb_private_src_files'
#Check that backend files are converted to fileset properly
assert len(core.file_sets[3].file) == 3
compare_fileset(core.file_sets[3], 'backend_files', ['data/sockit.qsys', 'data/sockit.sdc', 'data/pinmap.tcl'])
assert core.file_sets[3].file[0].file_type == 'QSYS'
assert core.file_sets[3].file[1].file_type == 'SDC'
assert core.file_sets[3].file[2].file_type == 'tclSource'
#Check backend section
assert core.quartus.quartus_options == '--64bit'
assert core.quartus.family == '"Cyclone V"'
assert core.quartus.device == '5CSXFC6D6F31C8ES'
assert core.quartus.top_module == 'orpsoc_top'
assert core.quartus.warnings == []
def test_simulator():
#Explicit toplevel
filename = os.path.join(os.path.dirname(__file__),
__name__,
"c3demo.core")
core = Core(filename)
assert core.simulator['toplevel'] == 'testbench'
#Implicit toplevel
filename = os.path.join(os.path.dirname(__file__),
__name__,
"atlys.core")
core = Core(filename)
assert core.simulator['toplevel'] == 'orpsoc_tb'
def test_verilator():
cores_root = os.path.join(os.path.dirname(__file__), __name__)
core = Core(os.path.join(cores_root, "verilator_managed_systemc.core"))
expected = {'cli_parser' : 'managed', 'libs' : [], 'mode' : 'sc'}
assert expected == core.get_tool_options({'is_toplevel' : True, 'tool' : 'verilator'})
assert len(core.file_sets) == 2
compare_fileset(core.file_sets[0], 'verilator_src_files', ['file1.sc', 'file2.sc'])
assert core.file_sets[0].file[0].file_type == 'systemCSource'
assert core.file_sets[0].file[1].file_type == 'systemCSource'
compare_fileset(core.file_sets[1], 'verilator_tb_toplevel', [])
| imphil/fusesoc | tests/test_capi1.py | Python | gpl-3.0 | 10,310 |
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.cross_validation import train_test_split
from sklearn.metrics import mean_squared_error
from .auto_segment_FEGT import BasicSegmenter_FEGT
def demo(X = None, y = None, test_size = 0.1):
if X == None:
boston = load_boston()
X = pd.DataFrame(boston.data)
y = pd.DataFrame(boston.target)
base_estimator = DecisionTreeRegressor(max_depth = 5)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
print X_train.shape
clf = BasicSegmenter_FEGT(ngen=30, init_sample_percentage = 1, n_votes=10, n = 10, base_estimator = base_estimator)
clf.fit(X_train, y_train)
print clf.score(X_test,y_test)
y = clf.predict(X_test)
print mean_squared_error(y, y_test)
print y.shape
print type(y)
return clf
| bhanu-mnit/EvoML | evoml/subsampling/test_auto_segmentEG_FEGT.py | Python | gpl-3.0 | 1,004 |
import random
from math import sqrt
import numpy
def validate_cost(result, boot_size, delta=500):
budget = result["budget"]
for metric_name, _, data_process in result['analysis']:
if metric_name == "cost":
cost_data = list(x() for x in data_process)
data_analysis = yield_analysis(cost_data, boot_size)
cost_val = data_analysis["btstrpd"]["metrics"]
return cost_val <= budget + delta
return True
def find_acceptable_result_for_budget(results, boot_size):
delta = 500
prev_budget = results[-1]['budget']
for result in reversed(results):
budget = result['budget']
delta += prev_budget - budget
if validate_cost(result,boot_size, delta):
return result
prev_budget = budget
return None
def average(xs):
if len(xs) == 0:
return -float("inf")
return sum(xs) * 1.0 / len(xs)
def sample_wr(population, k):
"""Chooses k random elements (with replacement) from a population"""
n = len(population) - 1
return [population[int(random.randint(0, n))] for i in range(k)]
def bootstrap(population, f, n, k, alpha):
btstrp = sorted(f(sample_wr(population, k)) for i in range(n))
return {
"confidence": 100.0 * (1 - 2 * alpha),
"from": btstrp[int(1.0 * n * alpha)],
"to": btstrp[int(1.0 * n * (1 - alpha))],
"metrics": f(population)
}
def yield_analysis(data_process, boot_size):
q1 = numpy.percentile(data_process, 25)
q3 = numpy.percentile(data_process, 75)
iq = q3 - q1
low_inn_fence = q1 - 1.5*iq
upp_inn_fence = q3 + 1.5*iq
low_out_fence = q1 - 3*iq
upp_out_fence = q3 + 3*iq
# noinspection PyRedeclaratione
extr_outliers = len([x
for x in data_process
if (x < low_out_fence or upp_out_fence < x)])
# noinspection PyRedeclaration
mild_outliers = len([x for x in data_process if (x < low_inn_fence or upp_inn_fence < x)]) - extr_outliers
extr_outliers = extr_outliers > 0 and "{0:6.2f}%".format(extr_outliers * 100.0 / len(data_process)) or "--"
mild_outliers = mild_outliers > 0 and "{0:6.2f}%".format(mild_outliers * 100.0 / len(data_process)) or "--"
metrics_nooutliers = average([x for x in data_process if low_inn_fence <= x <= upp_inn_fence])
try:
mean_nooutliers = float(average([x for x in data_process if low_inn_fence <= x <= upp_inn_fence]))
variance_nooutliers = [(x - mean_nooutliers) ** 2 for x in data_process if low_inn_fence <= x <= upp_inn_fence]
stdev_nooutliers = sqrt(average(variance_nooutliers))
except ValueError:
stdev_nooutliers = -float("inf")
mean_nooutliers = float("inf")
btstrpd = bootstrap(data_process, average, boot_size, int(len(data_process) * 0.66), 0.025)
goodbench = "✓"
try:
mean = float(average(data_process))
variance = [(x - mean) ** 2 for x in data_process]
stdev = sqrt(average(variance))
lower = mean - 3 * stdev
upper = mean + 3 * stdev
if len([x for x in data_process if lower <= x <= upper]) < 0.95 * len(data_process):
goodbench = "╳╳╳╳╳"
except ValueError:
stdev = lower = upper = mean = float("inf")
goodbench = "?"
try:
mean_nooutliers_diff = 100.0 * (mean_nooutliers - mean) / mean
except ZeroDivisionError:
mean_nooutliers_diff = float("inf")
try:
stdev_nooutliers_diff = 100.0 * (stdev_nooutliers - stdev) / stdev
except ZeroDivisionError:
stdev_nooutliers_diff = float("inf")
dispersion_warn = ""
try:
pr_dispersion = 100.0 * (float(btstrpd["to"]) - float(btstrpd["from"])) / btstrpd["metrics"]
if abs(pr_dispersion) > 30.:
dispersion_warn = " HIGH"
except ZeroDivisionError:
pr_dispersion = float("+Infinity")
return {
"low_inn_fence": low_inn_fence,
"upp_inn_fence": upp_inn_fence,
"low_out_fence": low_out_fence,
"upp_out_fence": upp_out_fence,
"stdev": stdev,
"mean": mean,
"lower": lower,
"upper": upper,
"goodbench": goodbench,
"btstrpd": btstrpd,
"mild_outliers": mild_outliers,
"extr_outliers": extr_outliers,
"metrics_nooutliers": metrics_nooutliers,
"mean_nooutliers_diff": mean_nooutliers_diff,
"stdev_nooutliers": stdev_nooutliers,
"stdev_nooutliers_diff": stdev_nooutliers_diff,
"pr_dispersion": pr_dispersion,
"dispersion_warn": dispersion_warn
}
# return low_inn_fence, upp_inn_fence, low_out_fence, upp_out_fence, stdev, mean, lower, upper, goodbench, btstrpd,
# stdev, mild_outliers, extr_outliers, metrics_nooutliers, mean_nooutliers_diff, stdev_nooutliers,
# stdev_nooutliers_diff, pr_dispersion, dispersion_warn | kgadek/evogil | statistic/stats_bootstrap.py | Python | gpl-3.0 | 4,906 |
import matplotlib.pyplot as plt
from bob.io.base import load
from bob.io.base.test_utils import datafile
from bob.io.image import imshow
from bob.ip.facedetect.tinyface import TinyFacesDetector
from matplotlib.patches import Rectangle
# load colored test image
color_image = load(datafile("test_image_multi_face.png", "bob.ip.facedetect"))
is_mxnet_available = True
try:
import mxnet
except Exception:
is_mxnet_available = False
if not is_mxnet_available:
imshow(color_image)
else:
# detect all faces
detector = TinyFacesDetector()
detections = detector.detect(color_image)
imshow(color_image)
plt.axis("off")
for annotations in detections:
topleft = annotations["topleft"]
bottomright = annotations["bottomright"]
size = bottomright[0] - topleft[0], bottomright[1] - topleft[1]
# draw bounding boxes
plt.gca().add_patch(
Rectangle(
topleft[::-1],
size[1],
size[0],
edgecolor="b",
facecolor="none",
linewidth=2,
)
) | bioidiap/bob.ip.facedetect | doc/plot/detect_faces_tinyface.py | Python | gpl-3.0 | 1,124 |
#!/usr/bin/python
# Copyright 2013 Sumana Harihareswara
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
A silly game in Python. See the README.
"""
import sys
import random
import textwrap
import mock
def anykey():
x = raw_input("Press Return to continue. ") + "a"
class City(object):
"""Each City has a name, a set of destinations one step away, and a clue."""
def __init__(self, n, c):
self.dests = []
self.name = n
self.clue = c
class Villain(object):
def __init__(self):
self.name = random.choice(["Carmen", "Waldo", "Edward Snowden", "Lyra"])
self.location = random.choice([cbl, chmr, ftl])
class Hero(object):
def __init__(self):
self.location = ind
self.name = raw_input("Detective at keyboard, identify yourself: ")
class Game():
def __init__(self):
self.player = Hero()
self.nemesis = Villain()
def __repr__(self):
return """A session of the game, with a hero and villain set up with names and locations."""
def wincondition(self):
"""The player wins when s/he is in the same City as the nemesis."""
return self.player.location == self.nemesis.location
def playturn(self):
print "%s, you are now in %s and you can head to:" % (self.player.name, self.player.location.name)
self.where2go()
print "You ask around about %s and learn that %s" % (self.nemesis.name, self.nemesis.location.clue)
choice = raw_input('OK, now which way will you go? Choose a number. ')
self.choose(choice)
self.wincondition()
def wingame(self):
print "You found her in %s so you win!" % currentsession.nemesis.location.name
playagain=raw_input('Would you like to play again? Y/N: ')
if (playagain == "N") or (playagain == "n"):
sys.exit()
else:
self.player.location = ind
self.nemesis = Villain()
print "Get ready for a new game!"
anykey()
def where2go(self):
for i,x in enumerate(self.player.location.dests):
print "%d. %s" % (i+1, x.name)
def choose(self, path):
try:
path = int(path)
except ValueError:
print "That doesn't make sense, %s, because it's not the number for one of your possible destinations." % self.player.name
print "So you stay in %s." % self.player.location.name
return
if path < 1 or path > (len(self.player.location.dests)):
return "That doesn't make sense, %s, so you stay in %s." % (self.player.name, self.player.location.name)
else:
self.player.location = self.player.location.dests[path-1]
if self.wincondition(): self.wingame()
self.nemesis.location = random.choice(self.nemesis.location.dests)
return "You follow %s to %s." % (self.nemesis.name, self.player.location.name)
ind = City("Independence", "she thought she'd stock up for a journey -- bullets, yokes of oxen, and whatnot.")
sjo = City("Saint Joseph", "she had a headache and needed to find some baby aspirin.")
cbl = City("Council Bluffs", "she knew that you can't beat City Hall, but thought another municipal body might back down more easily.")
fkn = City("Fort Kearney", "she wanted to visit the easternmost point of the Platte River Valley's natural roadway.")
chmr = City("Chimney Rock", "the tow-headed woman was tired of spelunking and wanted to try climbing.")
ftl = City("Fort Laramie", "she had a lot of questions about the American Fur Company.")
vc = City("Virginia City", "she wanted to see the birthplace of Calamity Jane.")
sp = City("South Pass", "she said she was fixin' to cross the Continental Divide!")
slc = City("Salt Lake City", "she said she was planning on having coffee with the Prophet... they didn't have the heart to tell her.")
fh = City("Fort Hall", "she asked about the Snake River country.")
pdx = City("Portland", "she said she longed to see the future home of Open Source Bridge, the yearly conference by the Stumptown Syndicate.")
# Clue wit by Leonard. Thank you @leonardr.
ind.dests = [fkn]
sjo.dests = [fkn]
cbl.dests = [fkn]
fkn.dests = [cbl, ind, ftl, sjo, vc, chmr]
chmr.dests = [fkn]
ftl.dests = [vc, sp, fkn]
vc.dests = [ftl, fkn]
sp.dests = [fh, ftl, slc]
slc.dests = [sp, fh]
fh.dests = [sp, pdx, slc]
pdx.dests = [fh]
def test_bidirectionalpaths():
for city in [ind, sjo, cbl, fkn, chmr, ftl, vc, sp, slc, fh, pdx]:
for dest in city.dests:
try:
assert city in dest.dests
except AssertionError:
print "bidirectional fail! City" , city.name , "was not in" , dest.name , "destinations."
dest.dests.append(city)
print "fixed it!" , city.name , "now in destinations for" , dest.name , "in this list:", map(lambda x: x.name,dest.dests)
test_bidirectionalpaths()
def test_pathfinding():
# try to get ind-fkn-ftl-sp-slc-fh-pdx
# FIXME: does not work yet due to syntax error
try:
map(lambda x,y:assert y in x.dests,[[ind,fkn], [fkn,ftl], [ftl,sp], [sp,slc], [slc,fh], [fh,pdx]])
except AssertionError:
print "whoops!",y,"not in the destination list for",x
# would be good to do pathfinding
gpl = """You are now playing:
Where On The Oregon Trail is Carmen Sandiego?
Copyright (C) 2013 Sumana Harihareswara and licensed under the GNU Public License.
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it under certain conditions; see https://www.gnu.org/licenses/gpl.txt for details."""
if __name__=="__main__":
print gpl
currentsession = Game()
currentrank = "Okay, %s, your current rank is: Carpenter. Welcome to %s." % (currentsession.player.name, currentsession.player.location.name)
print textwrap.fill(currentrank,70,replace_whitespace=False)
assert currentsession.nemesis != currentsession.player
anykey()
print "%s has stolen a wagon tongue and Interpol has assigned you to catch her! Get ready for a chase!" % currentsession.nemesis.name
while True:
currentsession.playturn()
| brainwane/carmen | carmen.py | Python | gpl-3.0 | 6,748 |
import nengo
model = nengo.Network()
with model:
a = nengo.Ensemble(n_neurons=100, dimensions=1)
stim_a = nengo.Node([0])
nengo.Connection(stim_a, a)
b = nengo.Ensemble(n_neurons=100, dimensions=1)
stim_b = nengo.Node([0])
nengo.Connection(stim_b, b)
c = nengo.Ensemble(n_neurons=100, dimensions=1)
nengo.Connection(a, c)
nengo.Connection(b, c)
| tcstewar/nengo_assignments | groningen_2018/addition.py | Python | gpl-3.0 | 422 |
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
import zipfile
from pyload.plugin.Extractor import Extractor, ArchiveError, CRCError, PasswordError
from pyload.utils import fs_encode
class UnZip(Extractor):
__name = "UnZip"
__type = "extractor"
__version = "1.12"
__description = """Zip extractor plugin"""
__license = "GPLv3"
__authors = [("Walter Purcaro", "vuolter@gmail.com")]
EXTENSIONS = [".zip", ".zip64"]
NAME = __name__.rsplit('.', 1)[1]
VERSION = "(python %s.%s.%s)" % (sys.version_info[0], sys.version_info[1], sys.version_info[2])
@classmethod
def isUsable(cls):
return sys.version_info[:2] >= (2, 6)
def list(self, password=None):
with zipfile.ZipFile(fs_encode(self.filename), 'r', allowZip64=True) as z:
z.setpassword(password)
return z.namelist()
def check(self, password):
pass
def verify(self):
with zipfile.ZipFile(fs_encode(self.filename), 'r', allowZip64=True) as z:
badfile = z.testzip()
if badfile:
raise CRCError(badfile)
else:
raise PasswordError
def extract(self, password=None):
try:
with zipfile.ZipFile(fs_encode(self.filename), 'r', allowZip64=True) as z:
z.setpassword(password)
badfile = z.testzip()
if badfile:
raise CRCError(badfile)
else:
z.extractall(self.out)
except (zipfile.BadZipfile, zipfile.LargeZipFile), e:
raise ArchiveError(e)
except RuntimeError, e:
if "encrypted" in e:
raise PasswordError
else:
raise ArchiveError(e)
else:
self.files = z.namelist()
| ardi69/pyload-0.4.10 | pyload/plugin/extractor/UnZip.py | Python | gpl-3.0 | 1,891 |
import os
import sys
import signal
import argparse
import requests
from urllib.parse import urljoin
from socket import gethostname
from io import BytesIO
from zipfile import ZipFile
from shutil import move, rmtree
from uuid import uuid4
from time import time, sleep
from subprocess import Popen, check_output, STDOUT, CalledProcessError
from fame.core import fame_init
from fame.core.module import ModuleInfo
from fame.core.internals import Internals
from fame.common.config import fame_config
from fame.common.constants import MODULES_ROOT
from fame.common.pip import pip_install
UNIX_INSTALL_SCRIPTS = {
"install.sh": ["sh", "{}"],
"install.py": ["python", "{}"]
}
WIN_INSTALL_SCRIPTS = {
"install.cmd": ["{}"],
"install.py": ["python", "{}"]
}
class Worker:
def __init__(self, queues, celery_args, refresh_interval):
self.queues = list(set(queues))
self.celery_args = [arg for arg in celery_args.split(' ') if arg]
self.refresh_interval = refresh_interval
def update_modules(self):
# Module updates are only needed for remote workers
if fame_config.remote:
# First, backup current code
backup_path = os.path.join(fame_config.temp_path, 'modules_backup_{}'.format(uuid4()))
move(MODULES_ROOT, backup_path)
# Replace current code with code fetched from web server
url = urljoin(fame_config.remote, '/modules/download')
try:
response = requests.get(url, stream=True, headers={'X-API-KEY': fame_config.api_key})
response.raise_for_status()
os.makedirs(MODULES_ROOT)
with ZipFile(BytesIO(response.content), 'r') as zipf:
zipf.extractall(MODULES_ROOT)
rmtree(backup_path)
print("Updated modules.")
except Exception as e:
print(("Could not update modules: '{}'".format(e)))
print("Restoring previous version")
move(backup_path, MODULES_ROOT)
self.update_module_requirements()
def update_module_requirements(self):
for module in ModuleInfo.get_collection().find():
module = ModuleInfo(module)
if 'error' in module:
del(module['error'])
if module['type'] == "Processing":
should_update = (module['queue'] in self.queues)
elif module['type'] in ["Threat Intelligence", "Reporting", "Filetype"]:
should_update = True
else:
should_update = (not fame_config.remote)
if should_update:
self.update_python_requirements(module)
self.launch_install_scripts(module)
module.save()
def update_python_requirements(self, module):
requirements = self._module_requirements(module)
if requirements:
print(("Installing requirements for '{}' ({})".format(module['name'], requirements)))
rcode, output = pip_install('-r', requirements)
# In case pip failed
if rcode:
self._module_installation_error(requirements, module, output)
def launch_install_scripts(self, module):
scripts = self._module_install_scripts(module)
for script in scripts:
try:
print(("Launching installation script '{}'".format(' '.join(script))))
check_output(script, stderr=STDOUT)
except CalledProcessError as e:
self._module_installation_error(' '.join(script), module, e.output.decode('utf-8', errors='replace'))
except Exception as e:
self._module_installation_error(' '.join(script), module, e)
def _module_installation_error(self, cmd, module, errors):
errors = "{}: error on '{}':\n\n{}".format(cmd, gethostname(), errors)
module['enabled'] = False
module['error'] = errors
print(errors)
def _module_requirements(self, module):
return module.get_file('requirements.txt')
def _module_install_scripts(self, module):
results = []
if sys.platform == "win32":
INSTALL_SCRIPTS = WIN_INSTALL_SCRIPTS
else:
INSTALL_SCRIPTS = UNIX_INSTALL_SCRIPTS
for filename in INSTALL_SCRIPTS:
filepath = module.get_file(filename)
if filepath:
cmdline = []
for arg in INSTALL_SCRIPTS[filename]:
cmdline.append(arg.format(filepath))
results.append(cmdline)
return results
# Delete files older than 7 days and empty directories
def clean_temp_dir(self):
current_time = time()
for root, dirs, files in os.walk(fame_config.temp_path, topdown=False):
for f in files:
filepath = os.path.join(root, f)
file_mtime = os.path.getmtime(filepath)
if (current_time - file_mtime) > (7 * 24 * 3600):
try:
os.remove(filepath)
except:
pass
for d in dirs:
dirpath = os.path.join(root, d)
try:
os.rmdir(dirpath)
except:
pass
def start(self):
try:
self.last_run = time()
self.clean_temp_dir()
self.update_modules()
self.process = self._new_celery_worker()
while True:
updates = Internals.get(name='updates')
if updates['last_update'] > self.last_run:
# Stop running worker
os.kill(self.process.pid, signal.SIGTERM)
self.process.wait()
# Update modules if needed
self.update_modules()
# Restart worker
self.process = self._new_celery_worker()
self.last_run = time()
sleep(self.refresh_interval)
except KeyboardInterrupt:
not_finished = True
while not_finished:
try:
self.process.wait()
not_finished = False
except KeyboardInterrupt:
pass
def _new_celery_worker(self):
return Popen(['celery', '-A', 'fame.core.celeryctl', 'worker', '-Q', ','.join(self.queues)] + self.celery_args)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Launches a FAME worker.')
parser.add_argument('queues', metavar='queue', type=str, nargs='*',
help='The task queues that this worker will handle.')
parser.add_argument('-c', '--celery_args', type=str, default='',
help='Additional arguments for the celery worker.')
parser.add_argument('-r', '--refresh_interval', type=int, default=30,
help='Frequency at which the worker will check for updates.')
args = parser.parse_args()
queues = args.queues
# Default queue is 'unix'
if len(queues) == 0:
if sys.platform == 'win32':
queues = ['windows']
else:
queues = ['unix']
# A local worker should also take care of updates
if not fame_config.remote:
queues.append('updates')
fame_init()
Worker(queues, args.celery_args, args.refresh_interval).start()
| certsocietegenerale/fame | worker.py | Python | gpl-3.0 | 7,534 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^post/(?P<pk>[0-9]+)/$', views.detail, name='detail'),
url(r'^archive/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/$', views.archives, name='archive'),
url(r'^category/(?P<pk>[0-9]+)/$', views.categories, name='category'),
url(r'^tag/(?P<pk>[0-9]+)/$', views.get_posts_by_tag, name='tag'),
]
| francislpx/myblog | blog/urls.py | Python | gpl-3.0 | 423 |
from gi.repository import Gtk, Gdk, GdkPixbuf
(TARGET_ENTRY_TEXT, TARGET_ENTRY_PIXBUF) = range(2)
(COLUMN_TEXT, COLUMN_PIXBUF) = range(2)
DRAG_ACTION = Gdk.DragAction.COPY
class DragDropWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Drag and Drop")
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.add(vbox)
hbox = Gtk.Box(spacing=12)
vbox.pack_start(hbox, True, True, 0)
self.iconview = DragSourceIconView()
self.drop_area = DropArea()
hbox.pack_start(self.iconview, True, True, 0)
hbox.pack_start(self.drop_area, True, True, 0)
button_box = Gtk.Box(spacing=6)
vbox.pack_start(button_box, True, False, 0)
image_button = Gtk.RadioButton.new_with_label_from_widget(None,
"Images")
image_button.connect("toggled", self.add_image_targets)
button_box.pack_start(image_button, True, False, 0)
text_button = Gtk.RadioButton.new_with_label_from_widget(image_button,
"Text")
text_button.connect("toggled", self.add_text_targets)
button_box.pack_start(text_button, True, False, 0)
self.add_image_targets()
def add_image_targets(self, button=None):
targets = Gtk.TargetList.new([])
targets.add_image_targets(TARGET_ENTRY_PIXBUF, True)
self.drop_area.drag_dest_set_target_list(targets)
self.iconview.drag_source_set_target_list(targets)
def add_text_targets(self, button=None):
self.drop_area.drag_dest_set_target_list(None)
self.iconview.drag_source_set_target_list(None)
self.drop_area.drag_dest_add_text_targets()
self.iconview.drag_source_add_text_targets()
class DragSourceIconView(Gtk.IconView):
def __init__(self):
Gtk.IconView.__init__(self)
self.set_text_column(COLUMN_TEXT)
self.set_pixbuf_column(COLUMN_PIXBUF)
model = Gtk.ListStore(str, GdkPixbuf.Pixbuf)
self.set_model(model)
self.add_item("Item 1", "image-missing")
self.add_item("Item 2", "help-about")
self.add_item("Item 3", "edit-copy")
self.enable_model_drag_source(Gdk.ModifierType.BUTTON1_MASK, [],
DRAG_ACTION)
self.connect("drag-data-get", self.on_drag_data_get)
def on_drag_data_get(self, widget, drag_context, data, info, time):
selected_path = self.get_selected_items()[0]
selected_iter = self.get_model().get_iter(selected_path)
if info == TARGET_ENTRY_TEXT:
text = self.get_model().get_value(selected_iter, COLUMN_TEXT)
data.set_text(text, -1)
elif info == TARGET_ENTRY_PIXBUF:
pixbuf = self.get_model().get_value(selected_iter, COLUMN_PIXBUF)
data.set_pixbuf(pixbuf)
def add_item(self, text, icon_name):
pixbuf = Gtk.IconTheme.get_default().load_icon(icon_name, 16, 0)
self.get_model().append([text, pixbuf])
class DropArea(Gtk.Label):
def __init__(self):
Gtk.Label.__init__(self, "Drop something on me!")
self.drag_dest_set(Gtk.DestDefaults.ALL, [], DRAG_ACTION)
self.connect("drag-data-received", self.on_drag_data_received)
def on_drag_data_received(self, widget, drag_context, x,y, data,info, time):
if info == TARGET_ENTRY_TEXT:
text = data.get_text()
print("Received text: %s" % text)
elif info == TARGET_ENTRY_PIXBUF:
pixbuf = data.get_pixbuf()
width = pixbuf.get_width()
height = pixbuf.get_height()
print("Received pixbuf with width %spx and height %spx" % (width,
height))
win = DragDropWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
| Dehyrf/python_gates | window.py | Python | gpl-3.0 | 3,800 |
# coding=utf-8
import unittest
"""3. Longest Substring Without Repeating Characters
https://leetcode.com/problems/longest-substring-without-repeating-characters/description/
Given a string, find the length of the **longest substring** without repeating
characters.
**Examples:**
Given `"abcabcbb"`, the answer is `"abc"`, which the length is 3.
Given `"bbbbb"`, the answer is `"b"`, with the length of 1.
Given `"pwwkew"`, the answer is `"wke"`, with the length of 3. Note that the
answer must be a **substring** , `"pwke"` is a _subsequence_ and not a
substring.
Similar Questions:
Longest Substring with At Most Two Distinct Characters (longest-substring-with-at-most-two-distinct-characters)
"""
class Solution(unittest.TestCase):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
cache = {}
val, pos = 0, 0
while pos < len(s):
if s[pos] in cache:
pos = cache[s[pos]] + 1
val = max(val, len(cache))
cache.clear()
else:
cache[s[pos]] = pos
pos += 1
val = max(val, len(cache))
return val
def test(self):
self.assertEqual(self.lengthOfLongestSubstring("abcabcbb"), 3)
self.assertEqual(self.lengthOfLongestSubstring("bbbbb"), 1)
self.assertEqual(self.lengthOfLongestSubstring("pwwkew"), 3)
self.assertEqual(self.lengthOfLongestSubstring("c"), 1)
if __name__ == "__main__":
unittest.main()
| openqt/algorithms | leetcode/python/ac/lc003-longest-substring-without-repeating-characters.py | Python | gpl-3.0 | 1,545 |
#(c) 2013-2014 by Authors
#This file is a part of Ragout program.
#Released under the BSD license (see LICENSE file)
"""
This module provides some basic FASTA I/O
"""
import logging
from string import maketrans
logger = logging.getLogger()
class FastaError(Exception):
pass
def read_fasta_dict(filename):
"""
Reads fasta file into dictionary. Also preforms some validation
"""
logger.info("Reading contigs file")
header = None
seq = []
fasta_dict = {}
try:
with open(filename, "r") as f:
for lineno, line in enumerate(f):
line = line.strip()
if line.startswith(">"):
if header:
fasta_dict[header] = "".join(seq)
seq = []
header = line[1:].split(" ")[0]
else:
if not _validate_seq(line):
raise FastaError("Invalid char in \"{0}\" at line {1}"
.format(filename, lineno))
seq.append(line)
if header and len(seq):
fasta_dict[header] = "".join(seq)
except IOError as e:
raise FastaError(e)
return fasta_dict
def write_fasta_dict(fasta_dict, filename):
"""
Writes dictionary with fasta to file
"""
with open(filename, "w") as f:
for header in sorted(fasta_dict):
f.write(">{0}\n".format(header))
for i in range(0, len(fasta_dict[header]), 60):
f.write(fasta_dict[header][i:i + 60] + "\n")
COMPL = maketrans("ATGCURYKMSWBVDHNXatgcurykmswbvdhnx",
"TACGAYRMKSWVBHDNXtacgayrmkswvbhdnx")
def reverse_complement(string):
return string[::-1].translate(COMPL)
def _validate_seq(sequence):
VALID_CHARS = "ACGTURYKMSWBDHVNXatgcurykmswbvdhnx"
if len(sequence.translate(None, VALID_CHARS)):
return False
return True
| ptdtan/Ragout | ragout/parsers/fasta_parser.py | Python | gpl-3.0 | 1,963 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;
java = [
{'name':'common',
'mainpackage':True,
'shortdesc':'Installs the latest version of Java',
'description':'',
'packages-trusty':['openjdk-7-jre-lib'],
'packages-xenial':[],
'packages-bionic':[],
'packages-focal':[],
'packages-groovy':[],
'side-by-side':['jre-headless', 'jre', 'jdk'],
},
{'name':'jre-headless',
'shortdesc':'Installs the latest version of the Java Runtime Environment',
'description':'',
'depends':['common'],
'packages-trusty':['openjdk-7-jre-headless', 'openjdk-8-jre-headless'],
'packages-xenial':['openjdk-8-jre-headless'],
'packages-bionic':['openjdk-8-jre-headless'],
'packages-focal':['openjdk-11-jre-headless'],
'packages-groovy':['openjdk-11-jre-headless'],
},
{'name':'jre',
'shortdesc':'Installs the latest version of the Java Runtime Environment',
'description':'',
'depends':['jre-headless'],
'packages-trusty':['openjdk-7-jre', 'openjdk-8-jre'],
'packages-xenial':['openjdk-8-jre'],
'packages-bionic':['openjdk-8-jre'],
'packages-focal':['openjdk-11-jre'],
'packages-groovy':['openjdk-11-jre'],
},
{'name':'jdk',
'shortdesc':'Installs the latest version of the Java Development Kit',
'description':'',
'depends':['jre'],
'packages-trusty':['openjdk-7-jdk', 'openjdk-8-jdk'],
'packages-xenial':['openjdk-8-jdk'],
'packages-bionic':['openjdk-8-jdk'],
'packages-focal':['openjdk-11-jdk'],
'packages-groovy':['openjdk-11-jdk'],
},
{'name':'jdk-headless',
'shortdesc':'Installs the latest version of the Java Development Kit',
'description':'',
'depends':['jre-headless'],
'packages-trusty':['openjdk-7-jdk-headless', 'openjdk-8-jdk-headless'],
'packages-xenial':['openjdk-8-jdk-headless'],
'packages-bionic':['openjdk-8-jdk-headless'],
'packages-focal':['openjdk-11-jdk-headless'],
'packages-groovy':['openjdk-11-jdk-headless'],
},
{'name':'none',
'shortdesc':'Uninstalls all versions of Java',
'description':'',
'packages':[],
'noconflicts':[]
},
]
| aroth-arsoft/arsoft-meta-packages | grp_java.py | Python | gpl-3.0 | 2,247 |
import json
from pprint import pprint
import requests
from telegram import Update, Bot
from telegram.ext import CommandHandler
from tg_bot import dispatcher
# Open API key
API_KEY = "6ae0c3a0-afdc-4532-a810-82ded0054236"
URL = "http://services.gingersoftware.com/Ginger/correct/json/GingerTheText"
def translate(bot: Bot, update: Update):
if update.effective_message.reply_to_message:
msg = update.effective_message.reply_to_message
params = dict(
lang="US",
clientVersion="2.0",
apiKey=API_KEY,
text=msg.text
)
res = requests.get(URL, params=params)
# print(res)
# print(res.text)
pprint(json.loads(res.text))
changes = json.loads(res.text).get('LightGingerTheTextResult')
curr_string = ""
prev_end = 0
for change in changes:
start = change.get('From')
end = change.get('To') + 1
suggestions = change.get('Suggestions')
if suggestions:
sugg_str = suggestions[0].get('Text') # should look at this list more
curr_string += msg.text[prev_end:start] + sugg_str
prev_end = end
curr_string += msg.text[prev_end:]
print(curr_string)
update.effective_message.reply_text(curr_string)
__help__ = """
- /t: while replying to a message, will reply with a grammar corrected version
"""
__mod_name__ = "Translator"
TRANSLATE_HANDLER = CommandHandler('t', translate)
dispatcher.add_handler(TRANSLATE_HANDLER)
| PaulSonOfLars/tgbot | tg_bot/modules/translation.py | Python | gpl-3.0 | 1,574 |
from member.models import Person, Family, Handphone, KeluargaGangguan, JenisDifable, StatusRumah
from member.models import Lantai, Dinding, Atap, JamsosDiterima, SumberAirMinum, StatusListrik
from member.models import DayaListrik, JadwalRonda, ProgramKB, Agama, StatusPerkawinan, HubunganKeluarga
from member.models import PendidikanTerakhir, Jurusan, Pekerjaan, GolonganDarah, PenghasilanBulanan, Hobi
from member.models import Keahlian, JenisUsaha, Organisasi, Media, TemaInformasi, AlatTransportasi
from member.models import Usia, StatusSosial
from rest_framework import serializers
from rest_framework.pagination import PageNumberPagination
class PersonSerializer(serializers.ModelSerializer):
class Meta:
model = Person
fields = '__all__'
class FamilySerializer(serializers.ModelSerializer):
class Meta:
model = Family
fields = '__all__'
depth = 1
class StatusPerkawinanSerializer(serializers.ModelSerializer):
class Meta:
model = StatusPerkawinan
fields = '__all__'
class GolonganDarahSerializer(serializers.ModelSerializer):
class Meta:
model = GolonganDarah
fields = '__all__'
class AgamaSerializer(serializers.ModelSerializer):
class Meta:
model = Agama
fields = '__all__'
class UsiaSerializer(serializers.ModelSerializer):
class Meta:
model = Usia
fields = '__all__'
class LargeResultsSetPagination(PageNumberPagination):
page_size = 100
page_size_query_param = 'page_size'
max_page_size = 1000
class StandardResultsSetPagination(PageNumberPagination):
page_size = 10
page_size_query_param = 'page_size'
max_page_size = 100 | lantip/sms-komunitas | medkom/member/serializers.py | Python | gpl-3.0 | 1,693 |
# -*- coding: utf-8 -*-
from django.db import models
import django.template.defaultfilters
from django.db.models import Max
from django.utils.functional import cached_property
# Create your models here.
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey,GenericRelation
#from django.contrib.contenttypes import generic
from django.dispatch import receiver
from django.db.models.signals import post_save,post_delete,pre_save,pre_delete
from django.db.models.signals import m2m_changed
import django.dispatch
position_changed = django.dispatch.Signal(providing_args=["instance"])
valid_changed = django.dispatch.Signal(providing_args=["instance"])
#from santaclara_base.models import PositionAbstract
import re
import heapq
def custom_model_list(model_list):
sections=["Language",
"Place",
"Time span",
"Person",
"Category",
"Author",
"Publisher",
"Book",
"Publication",
"Migr",
"Repository",]
ret={}
for sec in sections:
ret[sec]=[]
for model_dict in model_list:
if model_dict["model_label"] in ["repositorycachebook","repositorycacheauthor","repositoryfailedisbn",]:
ret["Repository"].append(model_dict)
continue
if model_dict["model_label"] in [ "timepoint","timespan","datemodifier" ]:
ret["Time span"].append(model_dict)
continue
if model_dict["model_label"] in [ "language","languagefamily","languagefamilyrelation",
"languagefamilyfamilyrelation","languagevarietytype","languagevariety" ]:
ret["Language"].append(model_dict)
continue
if model_dict["model_label"] in [ "placetype","place","alternateplacename","placerelation" ]:
ret["Place"].append(model_dict)
continue
if model_dict["model_label"] in [ "article","articleauthorrelation","issuetype",
"issue","publication","volumetype","volume" ]:
ret["Publication"].append(model_dict)
continue
if model_dict["model_label"] in [ "nameformat","nametype","nameformatcollection","personcache",
"person","personnamerelation" ]:
ret["Person"].append(model_dict)
continue
if model_dict["model_label"] in [ "categorytreenode","category","categoryrelation",
"categorytimespanrelation", "categoryplacerelation",
"categorypersonrelation",
"categorylanguagerelation" ]:
ret["Category"].append(model_dict)
continue
if model_dict["model_label"] in [ "author","authorrole","authorrelation" ]:
ret["Author"].append(model_dict)
continue
if model_dict["model_label"] in [ "migrauthor","migrpublisherriviste" ]:
ret["Migr"].append(model_dict)
continue
if model_dict["model_label"] in [ "publisherstate","publisheraddress","publisherisbn","publisher",
"publisheraddresspublisherrelation" ]:
ret["Publisher"].append(model_dict)
continue
ret["Book"].append(model_dict)
xret=[]
for sec in sections:
xret.append( (sec,ret[sec]))
return xret
class PositionAbstract(models.Model):
""" Classe astratta per gestire oggetti posizionabili all'interno di un elenco.
Definisce il campo *pos* (posizione) come intero positivo.
Emette il segnale :any:`santaclara_base.signals.position_changed`
quando la posizione viene modificata.
Un modello che estende la classe PositionAbstract e ridefinisce
__init__() o save() deve ricordarsi di richiamare rispettivamente
:any:`PositionAbstract.my_action_post_init
<santaclara_base.models.PositionAbstract.my_action_post_init>` e
:any:`PositionAbstract.my_action_post_save
<santaclara_base.models.PositionAbstract.my_action_post_save>`.
Un modello che estende la classe PositionAbstract con eredità
multipla e in modo che save() e __init__() siano ereditati da
un'altra classe (quindi con PositionAbstract non primo modello tra
i padri), deve ridefinirli in modo o da richiamare
PositionAbstract.save() e PositionAbstract.__init__() oppure da
utilizzare esplicitamente
:any:`PositionAbstract.my_action_post_init
<santaclara_base.models.PositionAbstract.my_action_post_init>` e
:any:`PositionAbstract.my_action_post_save
<santaclara_base.models.PositionAbstract.my_action_post_save>`.
"""
#: Posizione.
pos = models.PositiveIntegerField()
class Meta:
abstract = True
def __init__(self,*args,**kwargs):
super(PositionAbstract, self).__init__(*args, **kwargs)
self.my_action_post_init(*args,**kwargs)
def save(self,*args,**kwargs):
super(PositionAbstract,self).save(*args,**kwargs)
self.my_action_post_save(*args,**kwargs)
def my_action_post_save(self,*args,**kwargs):
""" Se un modello che estende PositionAbstract sovrascrive
save() e non richiama esplicitamente PositionAbstract.save(),
oppure se in caso di eredità multipla il save() del modello
non è PositionAbstract.save(), nel nuovo save() dev'essere
richiamata questa funzione, passandole gli stessi parametri di
save(). """
if self.__original_pos!=self.pos:
position_changed.send(self.__class__,instance=self)
self.__original_pos = self.pos
def my_action_post_init(self,*args,**kwargs):
""" Se un modello che estende PositionAbstract sovrascrive
__init__() e non richiama esplicitamente PositionAbstract.__init__(),
oppure se in caso di eredità multipla il __init__() del modello
non è PositionAbstract.__init__(), nel nuovo __init__() dev'essere
richiamata questa funzione, passandole gli stessi parametri di
__init__(). """
self.__original_pos = self.pos
class LabeledAbstract(models.Model):
label = models.SlugField(unique=True)
description = models.CharField(max_length=1024)
class Meta:
abstract = True
def __str__(self):
return str(self.label)
def clean(self,*args,**kwargs):
self.label = self.label.lower()
super(LabeledAbstract, self).clean(*args, **kwargs)
### time span
class DateModifier(PositionAbstract):
name = models.CharField(max_length=1024)
reverse = models.BooleanField(default=False)
class Meta:
ordering = [ 'pos' ]
def __str__(self):
if self.id==0: return ""
if not self.name: return "-"
return str(self.name)
def save(self,*args,**kwargs):
super(DateModifier, self).save(*args, **kwargs)
for obj in self.timepoint_set.all():
obj.save()
class TimePoint(models.Model):
date = models.IntegerField()
modifier = models.ForeignKey(DateModifier,blank=True,default=0,on_delete=models.PROTECT)
class Meta:
ordering = [ 'modifier','date' ]
unique_together= [ 'modifier','date' ]
def __str__(self):
U=str(abs(self.date))
if self.modifier.id!=0:
U+=" "+str(self.modifier)
return U
def save(self,*args,**kwargs):
if not self.modifier:
self.modifier=DateModifier.objects.get(id=0)
if self.modifier.reverse:
self.date=-abs(self.date)
else:
self.date=abs(self.date)
super(TimePoint, self).save(*args, **kwargs)
def begins(self):
return "; ".join([str(x) for x in self.begin_set.all()])
def ends(self):
return "; ".join([str(x) for x in self.end_set.all()])
def time_spans(self):
L=[str(x) for x in self.begin_set.all()]
L+=[str(x) for x in self.end_set.all()]
L=list(set(L))
return "; ".join(L)
class TimeSpan(models.Model):
begin = models.ForeignKey(TimePoint,related_name="begin_set",on_delete=models.PROTECT)
end = models.ForeignKey(TimePoint,related_name="end_set",on_delete=models.PROTECT)
name = models.CharField(max_length=4096,blank=True)
def __str__(self):
if self.name:
return str(self.name)
return str(self.begin)+"-"+str(self.end)
class Meta:
ordering = [ 'begin','end' ]
def categories(self):
return "; ".join([str(x.category) for x in self.categorytimespanrelation_set.all()])
### language
class Language(models.Model):
name = models.CharField(max_length=4096)
def __str__(self): return self.name
def families(self):
return "; ".join([str(x.family) for x in self.languagefamilyrelation_set.all()])
def varieties(self):
return "; ".join([str(x) for x in self.languagevariety_set.all()])
class LanguageFamily(models.Model):
name = models.CharField(max_length=4096)
def __str__(self): return self.name
def parents(self):
return "; ".join([str(x.parent) for x in self.parent_set.all()])
def children(self):
return "; ".join([str(x.child) for x in self.child_set.all()])
def languages(self):
return "; ".join([str(x.language) for x in self.languagefamilyrelation_set.all()])
class LanguageFamilyRelation(models.Model):
language = models.ForeignKey(Language,on_delete=models.PROTECT)
family = models.ForeignKey(LanguageFamily,on_delete=models.PROTECT)
def __str__(self):
return str(self.family)+"/"+str(self.language)
class LanguageFamilyFamilyRelation(models.Model):
parent = models.ForeignKey(LanguageFamily,related_name="child_set",on_delete=models.PROTECT)
child = models.ForeignKey(LanguageFamily,related_name="parent_set",on_delete=models.PROTECT)
def __str__(self):
return str(self.parent)+"/"+str(self.child)
class Meta:
ordering = ["parent","child"]
class LanguageVarietyType(models.Model):
name = models.CharField(max_length=4096)
def __str__(self): return self.name
class LanguageVariety(models.Model):
name = models.CharField(max_length=4096,blank=True)
language = models.ForeignKey(Language,on_delete=models.PROTECT)
type = models.ForeignKey(LanguageVarietyType,default=1,on_delete=models.PROTECT)
def __str__(self):
if self.type.id==1:
return str(self.language)
if not self.name:
return str(self.language)
return str(self.language)+" ("+str(self.name)+")"
### place
class PlaceType(models.Model):
name = models.CharField(max_length=4096)
def __str__(self): return self.name
class Place(models.Model):
name = models.CharField(max_length=4096,unique=True)
type = models.ForeignKey(PlaceType,on_delete=models.PROTECT)
def __str__(self):
return self.name
def alternate_names(self):
return "; ".join([str(x.name) for x in self.alternateplacename_set.all()])
def areas(self):
return "; ".join([str(x.area) for x in self.area_set.all()])
def places(self):
return "; ".join([str(x.place) for x in self.place_set.all()])
class Meta:
ordering = [ "name" ]
class AlternatePlaceName(models.Model):
place = models.ForeignKey(Place,on_delete=models.PROTECT)
name = models.CharField(max_length=4096)
note = models.CharField(max_length=65536,blank=True)
def __str__(self):
return self.name
class PlaceRelation(models.Model):
place = models.ForeignKey(Place,related_name="area_set",on_delete=models.PROTECT)
area = models.ForeignKey(Place,related_name="place_set",on_delete=models.PROTECT)
def __str__(self):
return str(self.area)+"/"+str(self.place)
class Meta:
ordering = ["area","place"]
### person
class NameFormat(LabeledAbstract):
pattern = models.CharField(max_length=1024)
class Meta:
ordering = ["label"]
def save(self, *args, **kwargs):
super(NameFormat, self).save(*args, **kwargs)
for coll in self.long_format_set.all():
coll.save()
for coll in self.short_format_set.all():
coll.save()
for coll in self.ordering_format_set.all():
coll.save()
for coll in self.list_format_set.all():
coll.save()
class NameType(LabeledAbstract): pass
RE_NAME_SEP=re.compile("('| |-)")
VONS=["von","di","da","del","della","dell","dello","dei","degli","delle","de","d","la","lo",
"dal","dalla","dall","dallo","dai","dagli","dalle","al","ibn"]
ROMANS=["I","II","III","IV","V","VI","VII","VIII","IX","X",
"XI","XII","XIII","XIV","XV","XVI","XVII","XVIII","XIX","XX",
"XXI","XXII","XXIII","XXIV","XXV","XXVI","XXVII","XXVIII","XXIX","XXX",
"XXXI","XXXII","XXXIII","XXXIV","XXXV","XXXVI","XXXVII","XXXVIII","XXXIX","XL",
"XLI","XLII","XLIII","XLIV","XLV","XLVI","XLVII","XLVIII","XLIX","L"]
class NameFormatCollectionManager(models.Manager):
def get_preferred(self,num_fields):
preferred_list=self.all().filter(preferred=True)
for format_c in preferred_list:
fields=format_c.fields
if len(fields)==num_fields:
return format_c
format_max_num=-1
format_max=None
for format_c in self.all():
fields=format_c.fields
if len(fields)==num_fields:
return format_c
if len(fields)>format_max_num:
format_max_num=len(fields)
format_max=format_c
return format_max
def get_format_for_name(self,search):
if not search:
return self.get_preferred(0),[]
if search.lower().replace(".","") in [ "av","aavv" ]:
return self.get_preferred(0),[]
t=RE_NAME_SEP.split(search)
names=[]
t_vons=""
for n in range(0,len(t)):
if not t[n]: continue
if t[n] in [ " ","'" ]:
if t_vons:
t_vons+=t[n]
continue
if t[n]=="-":
if t_vons:
t_vons+="-"
else:
names[-1]+="-"
continue
if t[n].lower() not in VONS:
if names and names[-1].endswith("-"):
names[-1]+=t[n].capitalize()
else:
names.append(t_vons+t[n].capitalize())
t_vons=""
continue
t_vons+=t[n]
return self.get_preferred(len(names)),names
class NameFormatCollection(LabeledAbstract):
long_format = models.ForeignKey(NameFormat,related_name='long_format_set',on_delete=models.PROTECT)
short_format = models.ForeignKey(NameFormat,related_name='short_format_set',on_delete=models.PROTECT)
list_format = models.ForeignKey(NameFormat,related_name='list_format_set',on_delete=models.PROTECT)
ordering_format = models.ForeignKey(NameFormat,related_name='ordering_format_set',on_delete=models.PROTECT)
preferred = models.BooleanField(default=False)
objects = NameFormatCollectionManager()
def save(self, *args, **kwargs):
super(NameFormatCollection, self).save(*args, **kwargs)
for person in self.person_set.all():
person.update_cache()
@cached_property
def fields(self):
L=["name","surname"]
long_name=str(self.long_format.pattern)
short_name=str(self.short_format.pattern)
list_name=str(self.list_format.pattern)
ordering_name=str(self.ordering_format.pattern)
for s in "VALURNIC":
long_name=long_name.replace("{{"+s+"|","{{")
short_name=short_name.replace("{{"+s+"|","{{")
list_name=list_name.replace("{{"+s+"|","{{")
ordering_name=ordering_name.replace("{{"+s+"|","{{")
names=[]
for f in [long_name,short_name,list_name,ordering_name]:
L=[x.replace("{{","").replace("}}","") for x in re.findall(r'{{.*?}}',f)]
for name in L:
if name in names: continue
names.append(name)
return names
### Sintassi dei formati
# {{<name_type>}}: <name_type>
# {{C|<name_type>}}: <name_type> (capitalized)
# {{V|<name_type>}}: <name_type> (capitalized except von, de, ecc.)
# {{L|<name_type>}}: <name_type> (lowered)
# {{U|<name_type>}}: <name_type> (uppered)
# {{A|<name_type>}}: <name_type> as integer in arabic
# {{R|<name_type>}}: <name_type> as integer in roman upper
# {{N|<name_type>}}: <name_type> (lowered and with space => _)
# {{I|<name_type>}}: iniziali (Gian Uberto => G. U.)
def apply_formats(self,names):
long_name=str(self.long_format.pattern)
short_name=str(self.short_format.pattern)
list_name=str(self.list_format.pattern)
ordering_name=str(self.ordering_format.pattern)
list_upper=str(self.list_format.pattern)
list_lower=str(self.list_format.pattern)
names_list=list(names.items())
if not names_list:
return long_name,short_name,list_name,ordering_name,"-","-"
for key,rel in names_list:
val_f=rel.formatted()
long_name=long_name.replace("{{"+key+"}}",val_f["norm"])
short_name=short_name.replace("{{"+key+"}}",val_f["norm"])
list_name=list_name.replace("{{"+key+"}}",val_f["norm"])
ordering_name=ordering_name.replace("{{"+key+"}}",val_f["norm"])
list_upper=list_upper.replace("{{"+key+"}}",val_f["norm_upper"])
list_lower=list_lower.replace("{{"+key+"}}",val_f["norm_lower"])
for k in "VALURNIC":
long_name=long_name.replace("{{"+k+"|"+key+"}}",val_f[k])
short_name=short_name.replace("{{"+k+"|"+key+"}}",val_f[k])
list_name=list_name.replace("{{"+k+"|"+key+"}}",val_f[k])
ordering_name=ordering_name.replace("{{"+k+"|"+key+"}}",val_f[k])
if k in "AR":
list_upper=list_upper.replace("{{"+k+"|"+key+"}}",val_f[k])
list_lower=list_lower.replace("{{"+k+"|"+key+"}}",val_f[k])
else:
list_upper=list_upper.replace("{{"+k+"|"+key+"}}",val_f["norm_upper"])
list_lower=list_lower.replace("{{"+k+"|"+key+"}}",val_f["norm_lower"])
return long_name,short_name,list_name,ordering_name,list_upper[0],list_lower[0]
class PersonCache(models.Model):
long_name = models.CharField(max_length=4096,default="-")
short_name = models.CharField(max_length=4096,default="-")
list_name = models.CharField(max_length=4096,default="-")
ordering_name = models.CharField(max_length=4096,default="-")
upper_initial = models.CharField(max_length=4,default="-")
lower_initial = models.CharField(max_length=4,default="-")
class Meta:
ordering = ["ordering_name"]
db_table = 'bibliography_personcache'
def __str__(self): return self.list_name
class PersonManager(models.Manager):
def search_names(self,names):
qset=self.all()
if len(names)==0: return qset
#D=[]
for name in names:
if name.endswith("."):
name=name[:-1]
qset=qset.filter(personnamerelation__value__istartswith=name)
elif len(name)==1:
qset=qset.filter(personnamerelation__value__istartswith=name)
else:
qset=qset.filter(personnamerelation__value__iexact=name)
# if qset.count()>0: return qset.select_related("cache")
# if len(names)==1: return qset.select_related("cache")
# if len(names)==2:
# newnames=[ " ".join(names) ]
# return self.search_names(newnames)
# L=len(names)
# for n in range(0,L-1):
# newnames=names[0:n] + [ " ".join(names[n:n+2])] + names[n+2:L]
# qset=self.search_names(newnames)
# if qset.count()>0: return qset.select_related("cache")
return qset.select_related("cache")
def filter_by_name(self,search):
search=search.replace(" , "," ")
search=search.replace(", "," ")
search=search.replace(" ,"," ")
search=search.replace(","," ")
if search.lower() in [ "--","","- -","-","aavv","aa.vv.","aa. vv."]:
format_c=NameFormatCollection.objects.get(label="aavv")
qset=self.all().filter(format_collection=format_c)
return qset
t_name=search.lower().split(" ")
return self.search_names(t_name)
def look_for(self,name_list):
old={}
new=[]
for name in name_list:
qset=self.filter_by_name(name)
if qset.count():
old[name]=(qset.first())
else:
new.append(name)
return old,new
def create_by_names(self,format_collection,**kwargs):
obj=self.create(format_collection=format_collection)
for key,val in list(kwargs.items()):
name_type,created=NameType.objects.get_or_create(label=key)
rel,created=PersonNameRelation.objects.get_or_create(person=obj,name_type=name_type,
defaults={"value": val})
if not created:
rel.value=val
rel.save()
return obj
class Person(models.Model):
format_collection = models.ForeignKey(NameFormatCollection,on_delete=models.PROTECT)
cache = models.OneToOneField(PersonCache,editable=False,null=True,on_delete=models.PROTECT)
names = models.ManyToManyField(NameType,through='PersonNameRelation',blank=True)
objects = PersonManager()
class Meta:
ordering = ["cache"]
db_table = 'bibliography_person'
def __str__(self):
return self.list_name()
def long_name(self): return str(self.cache.long_name)
def short_name(self): return str(self.cache.short_name)
def ordering_name(self): return str(self.cache.ordering_name)
def list_name(self): return str(self.cache.list_name)
def upper_initial(self): return str(self.cache.upper_initial)
def lower_initial(self): return str(self.cache.lower_initial)
def save(self, *args, **kwargs):
if not self.cache:
self.cache = PersonCache.objects.create()
super(Person, self).save(*args, **kwargs)
self.update_cache()
def update_cache(self):
names={}
for rel in self.personnamerelation_set.all():
names[str(rel.name_type.label)]=rel
long_name,short_name,list_name,ordering_name,upper_initial,lower_initial=self.format_collection.apply_formats(names)
self.cache.long_name = long_name
self.cache.short_name = short_name
self.cache.list_name = list_name
self.cache.ordering_name = ordering_name
self.cache.upper_initial = upper_initial
self.cache.lower_initial = lower_initial
self.cache.save()
class PersonNameRelation(models.Model):
person = models.ForeignKey(Person,on_delete=models.PROTECT)
name_type = models.ForeignKey(NameType,on_delete=models.PROTECT)
value = models.CharField(max_length=4096,default="-",db_index=True)
case_rule = models.CharField(max_length=128,choices=[ ("latin","latin"),
("turkic","turkic") ],
default="latin")
def __str__(self): return str(self.value)
def save(self, *args, **kwargs):
super(PersonNameRelation, self).save(*args, **kwargs)
self.person.update_cache()
def _upper(self,x):
if self.case_rule=="latin":
return x.upper()
x=x.replace("ı","I")
x=x.replace("i","İ")
return x.upper()
def _lower(self,x):
if self.case_rule=="latin":
return x.lower()
x=x.replace("I","ı")
x=x.replace("İ","i")
return x.lower()
def _capitalize(self,x):
if self.case_rule=="latin":
return x.capitalize()
return self._upper(x[0])+self._lower(x[1:])
### Sintassi dei formati
# {{<name_type>}}: <name_type>
# {{C|<name_type>}}: <name_type> (capitalized)
# {{V|<name_type>}}: <name_type> (capitalized except von, de, ecc.)
# {{L|<name_type>}}: <name_type> (lowered)
# {{U|<name_type>}}: <name_type> (uppered)
# {{A|<name_type>}}: <name_type> as integer in arabic
# {{R|<name_type>}}: <name_type> as integer in roman upper
# {{N|<name_type>}}: <name_type> (lowered and with space => _)
# {{I|<name_type>}}: iniziali (Gian Uberto => G. U.)
def formatted(self):
val=str(self.value)
val_f={}
t=RE_NAME_SEP.split(val)
#t=map(lambda x: self._capitalize(x),RE_NAME_SEP.split(val))
vons_t=[]
norm_t=[]
for x in t:
if self._lower(x) in VONS:
vons_t.append(self._lower(x))
else:
if len(x)==1 and x.isalpha():
vons_t.append(self._upper(x)+".")
else:
vons_t.append(self._capitalize(x))
if len(x)==1 and x.isalpha():
norm_t.append(x+".")
else:
norm_t.append(x)
cap_t=[self._capitalize(x) for x in norm_t]
val_norm="".join(norm_t)
val_f["L"]=self._lower(val)
val_f["U"]=self._upper(val)
val_f["N"]=self._lower(val).replace(" ","_")
val_f["I"]=". ".join([x[0].upper() for x in list(filter(bool,val.split(" ")))])+"."
val_f["C"]="".join(cap_t)
val_f["V"]="".join(vons_t)
if val.isdigit():
val_f["R"]=ROMANS[int(val)-1]
val_f["A"]="%3.3d" % int(val)
else:
val_f["R"]=""
val_f["A"]=""
val_f["norm"]=val_norm
val_f["norm_upper"]=self._upper(val_norm)
val_f["norm_lower"]=self._lower(val_norm)
return val_f
# long_name=long_name.replace("{{"+key+"}}",val_norm)
# short_name=short_name.replace("{{"+key+"}}",val_norm)
# list_name=list_name.replace("{{"+key+"}}",val_norm)
# ordering_name=ordering_name.replace("{{"+key+"}}",val_norm)
# for k in "VALURNIC":
# long_name=long_name.replace("{{"+k+"|"+key+"}}",val_f[k])
# short_name=short_name.replace("{{"+k+"|"+key+"}}",val_f[k])
# list_name=list_name.replace("{{"+k+"|"+key+"}}",val_f[k])
# ordering_name=ordering_name.replace("{{"+k+"|"+key+"}}",val_f[k])
# return long_name,short_name,list_name,ordering_name
### category
class CategoryTreeNodeManager(models.Manager):
def roots(self):
return self.filter(level=0)
def until_level(self,level,only_category=True):
if not only_category:
return self.filter(level__lte=level)
return self.filter(level__lte=level,is_category=True)
def branch_nodes(self,base_node,level,only_category=True):
if not only_category:
return self.filter(level=level,node_id__istartswith=base_node.node_id+":")
return self.filter(level=level,node_id__istartswith=base_node.node_id+":",is_category=True)
def update_category(self,cat):
ctype = ContentType.objects.get_for_model(Category)
for cat_node in self.filter(content_type=ctype,object_id=cat.id):
level=int(cat_node.level)
old_node_id=str(cat_node.node_id)
parent_node_id=":".join(old_node_id.split(":")[:-1])
if parent_node_id:
new_node_id=parent_node_id+":"+cat.label
else:
new_node_id=cat.label
cat_node.node_id=new_node_id
cat_node.save()
if not cat_node.has_children: return
cat_children=list(self.filter(node_id__istartswith=old_node_id+":",level=level+1))
for child in cat_children:
self.reparent(new_node_id,level,child)
def remove_category(self,cat):
ctype = ContentType.objects.get_for_model(Category)
node_ids=[]
for cat_node in self.filter(content_type=ctype,object_id=cat.id):
node_ids.append(cat_node.node_id)
self.filter(node_id__istartswith=cat_node.node_id+':').delete()
cat_node.delete()
def create_category(self,cat):
newobj=self.create(content_object=cat,node_id=cat.label,has_children=False,level=0)
newobj.save()
return newobj
def reparent(self,parent_node_id,parent_level,cat_node):
ret=[]
old_node_id=str(cat_node.node_id)
old_level=int(cat_node.level)
rel_node_id=old_node_id.split(":")[-1]
if parent_node_id:
new_node_id=parent_node_id+":"+rel_node_id
else:
new_node_id=rel_node_id
if parent_level>=0:
new_level=parent_level+1
else:
new_level=0
cat_node.node_id=new_node_id
cat_node.level=new_level
cat_node.save()
ret.append(("R",cat_node))
if not cat_node.has_children: return ret
cat_children=list(self.filter(node_id__istartswith=old_node_id+":"))
for cch_node in cat_children:
new_cch_node_id=str(cch_node.node_id).replace(old_node_id+":",new_node_id+":",1)
new_cch_level=int(cch_node.level)-old_level+new_level
cch_node.node_id=new_cch_node_id
cch_node.level=new_cch_level
cch_node.save()
ret.append(("R",cch_node))
return ret
def clone(self,parent_node_id,parent_level,cat_node):
ret=[]
old_node_id=str(cat_node.node_id)
old_level=int(cat_node.level)
rel_node_id=old_node_id.split(":")[-1]
if parent_node_id:
new_node_id=parent_node_id+":"+rel_node_id
else:
new_node_id=rel_node_id
if parent_level>=0:
new_level=parent_level+1
else:
new_level=0
newobj=self.create(content_object=cat_node.content_object,
node_id=new_node_id,
has_children=cat_node.has_children,
level=new_level)
newobj.save()
ret.append(("C",newobj))
if not cat_node.has_children: return ret
cat_children=list(self.filter(node_id__istartswith=old_node_id+":"))
for cch_node in cat_children:
new_cch_node_id=str(cch_node.node_id).replace(old_node_id+":",new_node_id+":",1)
new_cch_level=int(cch_node.level)-old_level+new_level
newobj=self.create(content_object=cch_node.content_object,
node_id=new_cch_node_id,
has_children=cch_node.has_children,
level=new_cch_level)
newobj.save()
ret.append(("C",newobj))
return ret
def add_child_category(self,parent,child):
parent_nodes=list(parent.tree_nodes.all())
child_nodes=list(child.tree_nodes.all())
cn=child_nodes[0]
startind=0
new_objects=[]
if len(child_nodes)==1 and child_nodes[0].level==0:
## l'unico child è un rootnode
fn=parent_nodes[0]
new_objects=self.reparent(str(fn.node_id),int(fn.level),cn)
startind=1
fn.has_children=True
fn.save()
for fn in parent_nodes[startind:]:
new_objects+=self.clone(str(fn.node_id),int(fn.level),cn)
fn.has_children=True
fn.save()
return new_objects
def remove_child_category(self,parent,child):
parent_nodes=list(parent.tree_nodes.all())
child_nodes=list(child.tree_nodes.all())
del_list=[]
for fn in parent_nodes:
fn_node_id=str(fn.node_id)
for cn in child_nodes:
cn_node_id=str(cn.node_id)
cn_rel_node_id=cn_node_id.split(":")[-1]
if cn_node_id==fn_node_id+":"+cn_rel_node_id:
del_list.append((fn,cn))
break
if len(del_list)==len(child_nodes):
objs=self.clone("",-1,child_nodes[0])
for action,obj in objs:
obj.save()
for parent,node in del_list:
self.remove_branch(node)
parent.has_children=bool(self.filter(node_id__istartswith=str(parent.node_id)+":").exists())
parent.save()
def update_child_category(self,old_parent,old_child,new_parent,new_child):
if not old_parent and not old_child: return
if (old_parent==new_parent) and (old_child==new_child): return
self.remove_child_category(old_parent,old_child)
self.add_child_category(new_parent,new_child)
def remove_branch(self,basenode):
base_node_id=str(basenode.node_id)
self.filter(node_id__istartswith=base_node_id+":").delete()
self.filter(node_id=base_node_id).delete()
def add_category_relation(self,cat,child):
parent_nodes=list(cat.tree_nodes.all())
ret=[]
for fn in parent_nodes:
new_node_id=str(fn.node_id)+":"+str(child.id)
new_level=int(fn.level)+1
newobj=self.create(content_object=child,
node_id=new_node_id,
has_children=False,
level=new_level)
ret.append(("C",newobj))
fn.has_children=True
fn.save()
return ret
def remove_category_relation(self,cat,child):
parent_nodes=list(cat.tree_nodes.all())
node_ids=[]
for fn in parent_nodes:
node_ids.append(str(fn.node_id)+":"+str(child.id))
self.filter(node_id__in=node_ids).delete()
for fn in parent_nodes:
fn.has_children=bool(self.filter(node_id__istartswith=str(fn.node_id)+":").exists())
fn.save()
def update_category_relation(self,old_cat,old_child,new_cat,new_child):
if not old_cat and not old_child: return
if (old_cat==new_cat) and (old_child==new_child): return
self.remove_category_relation(old_cat,old_child)
self.add_category_relation(new_cat,new_child)
def get_num_objects(self,catnode):
if not catnode.is_category: return 1
N=self.filter(node_id__istartswith=catnode.node_id+":",is_category=False).values("content_type","object_id").distinct().count()
return N
def max_level(self,only_cat=True):
if not only_cat:
return self.all().aggregate(Max('level'))["level__max"]
return self.filter(is_category=True).aggregate(Max('level'))["level__max"]
class CategoryTreeNode(models.Model):
content_type = models.ForeignKey(ContentType,on_delete=models.PROTECT)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type','object_id')
node_id = models.CharField(max_length=4096,unique=True)
has_children = models.BooleanField()
level = models.PositiveIntegerField()
objects = CategoryTreeNodeManager()
label = models.CharField(max_length=4096,editable=False)
label_children = models.CharField(max_length=4096,editable=False)
is_category = models.BooleanField(editable=False)
num_objects = models.PositiveIntegerField(editable=False)
def branch_depth(self,only_cat=True):
if only_cat:
ret=CategoryTreeNode.objects.filter(node_id__istartswith=self.node_id+":",is_category=True).aggregate(Max('level'))["level__max"]
else:
ret=CategoryTreeNode.objects.filter(node_id__istartswith=self.node_id+":").aggregate(Max('level'))["level__max"]
if not ret: return 0
return ret
def branch_level_size(self,level,only_cat=True):
if only_cat:
return CategoryTreeNode.objects.filter(node_id__istartswith=self.node_id+":",
level=level,is_category=True).count()
return CategoryTreeNode.objects.filter(node_id__istartswith=self.node_id+":",level=level).count()
def branch(self,only_cat=True):
if only_cat:
return CategoryTreeNode.objects.filter(node_id__istartswith=self.node_id+":",is_category=True)
return CategoryTreeNode.objects.filter(node_id__istartswith=self.node_id+":")
def __str__(self):
U= "%3d %s" % (int(self.level),str(self.node_id))
return U
def direct_size(self):
if not self.is_category: return 0
return self.content_object.child_set.count()
class Meta:
ordering = [ "node_id" ]
def save(self, *args, **kwargs):
self.label_children="_"+str(self.node_id).replace(":","_")
t=str(self.node_id).split(":")
if len(t)==1:
self.label=""
else:
self.label="_"+"_".join(t[:-1])
self.is_category=( self.content_type.model_class() == Category )
self.num_objects = CategoryTreeNode.objects.get_num_objects(self)
super(CategoryTreeNode, self).save(*args, **kwargs)
class CategoryManager(models.Manager):
use_for_related_fields = True
def get_query_set(self):
class CategoryQueryset(models.query.QuerySet):
def all_in_branch(self,parent_id):
parent=Category.objects.get(id=int(parent_id))
children_ids=[parent.id]
for catnode in parent.tree_nodes.all():
L=catnode.branch()
children_ids+=[x.object_id for x in list(L)]
children_ids=list(set(children_ids))
return self.filter(id__in=children_ids)
return CategoryQueryset(Category)
def query_set_branch(self,queryset,parent_id):
parent=Category.objects.get(id=int(parent_id))
children_ids=[parent.id]
for catnode in parent.tree_nodes.all():
L=catnode.branch()
children_ids+=[x.object_id for x in list(L)]
children_ids=list(set(children_ids))
return queryset.filter(id__in=children_ids)
def all_in_branch(self,parent_id):
return self.get_query_set().all_in_branch(parent_id)
def merge(self,cat_queryset):
new_name="[merge]"
old_cats=list(cat_queryset.all())
for cat in old_cats:
new_name+=" "+cat.name
new_cat=self.create(name=new_name)
children=[]
for catrel in CategoryRelation.objects.filter(parent__in=old_cats):
if catrel.child in children:
catrel.delete()
continue
catrel.parent=new_cat
children.append(catrel.child)
catrel.save()
parents=[]
for catrel in CategoryRelation.objects.filter(child__in=old_cats):
if new_cat==catrel.parent:
catrel.delete()
continue
if catrel.parent in parents:
catrel.delete()
continue
catrel.child=new_cat
parents.append(catrel.parent)
catrel.save()
L=[]
for catrel in CategoryTimeSpanRelation.objects.filter(category__in=old_cats):
if catrel.time_span in L:
catrel.delete()
continue
catrel.category=new_cat
catrel.save()
L.append(catrel.time_span)
L=[]
for catrel in CategoryPlaceRelation.objects.filter(category__in=old_cats):
if catrel.place in L:
catrel.delete()
continue
catrel.category=new_cat
catrel.save()
L.append(catrel.place)
L=[]
for catrel in CategoryPersonRelation.objects.filter(category__in=old_cats):
if catrel.person in L:
catrel.delete()
continue
catrel.category=new_cat
catrel.save()
L.append(catrel.person)
L=[]
for catrel in CategoryLanguageRelation.objects.filter(category__in=old_cats):
if catrel.language in L:
catrel.delete()
continue
catrel.category=new_cat
catrel.save()
L.append(catrel.language)
for cat in old_cats:
for book in cat.book_set.all():
book.categories.add(new_cat)
book.categories.remove(cat)
cat.delete()
class Category(models.Model):
name = models.CharField(max_length=4096,unique=True)
label = models.SlugField(max_length=4096,editable=False,unique=True)
tree_nodes = GenericRelation(CategoryTreeNode)
objects = CategoryManager()
def __str__(self): return str(self.name)
class Meta:
ordering = ["name"]
def slugify(self):
S=str(self.name)
S=S.replace("#","sharp")
S=S.replace("++","plusplus")
return django.template.defaultfilters.slugify(S)
def save(self, *args, **kwargs):
self.label = self.slugify()
super(Category, self).save(*args, **kwargs)
def parents(self):
return "; ".join([str(x.parent) for x in self.parent_set.all()])
def children(self):
return "; ".join([str(x.child) for x in self.child_set.all()])
def time_span(self):
return "; ".join([str(x.time_span) for x in self.categorytimespanrelation_set.all()])
def place(self):
return "; ".join([str(x.place) for x in self.categoryplacerelation_set.all()])
def person(self):
return "; ".join([str(x.person) for x in self.categorypersonrelation_set.all()])
def language(self):
return "; ".join([str(x.language) for x in self.categorylanguagerelation_set.all()])
def num_books(self):
return self.book_set.count()
def min_level(self):
level=-1
for node in self.tree_nodes.all():
if level<0:
level=node.level
continue
level=min(level,node.level)
return level
def num_objects(self):
node=self.tree_nodes.all().first()
return node.num_objects
def my_branch_depth(self):
node=self.tree_nodes.all().first()
return node.branch_depth()
def my_branch_id(self):
level=-1
elected=None
for node in self.tree_nodes.all():
if level<0:
elected=node
level=node.level
continue
if level<=node.level: continue
elected=node
level=node.level
node_id=elected.node_id
big_parent_id=node_id.split(":")[0]
#big_parent_node=CategoryTreeNode.objects.get(node_id=big_parent_id)
return big_parent_id
class CategoryRelation(models.Model):
child = models.ForeignKey(Category,related_name="parent_set",on_delete=models.PROTECT)
parent = models.ForeignKey(Category,related_name="child_set",on_delete=models.PROTECT)
def __str__(self):
return str(self.parent)+"/"+str(self.child)
class Meta:
ordering = ["parent","child"]
class CategoryTimeSpanRelation(models.Model):
time_span=models.ForeignKey(TimeSpan,on_delete=models.PROTECT)
category=models.ForeignKey(Category,on_delete=models.PROTECT)
def __str__(self):
return str(self.time_span)+"/"+str(self.category)
class CategoryPlaceRelation(models.Model):
place=models.ForeignKey(Place,on_delete=models.PROTECT)
category=models.ForeignKey(Category,on_delete=models.PROTECT)
def __str__(self):
return str(self.place)+"/"+str(self.category)
class CategoryPersonRelation(models.Model):
person=models.ForeignKey(Person,on_delete=models.PROTECT)
category=models.ForeignKey(Category,on_delete=models.PROTECT)
def __str__(self):
return str(self.person)+"/"+str(self.category)
class CategoryLanguageRelation(models.Model):
language=models.ForeignKey(LanguageVariety,on_delete=models.PROTECT)
category=models.ForeignKey(Category,on_delete=models.PROTECT)
def __str__(self):
return str(self.language)+"/"+str(self.category)
class CategorizedObject(models.Model):
categories = models.ManyToManyField(Category,blank=True)
class Meta:
abstract = True
def get_categories(self):
return "; ".join([str(x) for x in self.categories.all()])
### authors
def print_result(label):
def g(func):
def f(*args):
res=func(*args)
print(label,res,*args)
return res
return f
return g
class AuthorManager(PersonManager):
def catalog(self):
class PubTuple(tuple):
def __new__ (cls, year,role,obj):
x=super(PubTuple, cls).__new__(cls, tuple( (year,role,obj) ))
return x
def __str__(self):
return "(%s,%s,%s)" % (str(self._year),str(self._role),str(self._obj))
def __init__(self,year,role,obj):
self._year=year
self._role=role
self._obj=obj
#@print_result("EQ")
def __eq__(self,other):
if self._year!=other._year: return False
if type(self._obj) is not type(other._obj): return False
return self._obj.id == other._obj.id
#@print_result("LT")
def __lt__(self,other):
if self._year < other._year: return True
if self._year > other._year: return False
if type(self._obj) is type(other._obj):
return self._obj.id < other._obj.id
if type(self._obj) is Book: return True
if type(other._obj) is Book: return False
return type(self._obj) is Issue
# if isinstance(self._obj,Book):
# if isinstance(other._obj,Book):
# if self._obj.title == other._obj.title:
# return self._obj.id < other._obj.id
# return self._obj.title < other._obj.title
# return True
# if isinstance(other._obj,Book): return False
# if isinstance(self._obj,Issue):
# s_date=self._obj.date
# else:
# s_date=self._obj.issue.date
# if isinstance(other._obj,Issue):
# o_date=other._obj.date
# else:
# o_date=other._obj.issue.date
# if s_date<o_date: return True
# if s_date>o_date: return False
# if type(self._obj) is not type(other._obj):
# return type(self._obj) is Issue
# if self._obj.title == other._obj.title:
# return self._obj.id < other._obj.id
# return self._obj.title < other._obj.title
def _gt__(self,other): return other.__lt__(self)
def _le__(self,other): return self.__eq__(other) or self.__lt__(other)
def _ge__(self,other): return self.__eq__(other) or self.__gt__(other)
def _ne__(self,other): return not self.__eq__(other)
class CatAuthor(object):
def __init__(self,db_author):
self._db_author=db_author
self.id=db_author.id
self.list_name=db_author.list_name()
self.long_name=db_author.long_name()
self.ordering_name=db_author.ordering_name()
self._publications=[]
def add(self,pub):
heapq.heappush(self._publications, pub)
@property
def publications(self):
return heapq.nsmallest(len(self._publications), self._publications)
issues=[ (rel.author,rel.author_role,rel.issue)
for rel in IssueAuthorRelation.objects.all().select_related() ]
books=[ (rel.author,rel.author_role,rel.book)
for rel in BookAuthorRelation.objects.all().select_related() ]
articles=[ (rel.author,rel.author_role,rel.article)
for rel in ArticleAuthorRelation.objects.all().select_related() ]
authors=[ CatAuthor(aut) for aut in self.all().select_related().prefetch_related("cache") ]
dict_aut={ aut.id: aut for aut in authors }
for aut,role,obj in issues:
dict_aut[aut.id].add( PubTuple(obj.year(),role,obj) )
for aut,role,obj in books:
dict_aut[aut.id].add( PubTuple(obj.year,role,obj) )
for aut,role,obj in articles:
dict_aut[aut.id].add( PubTuple(obj.year(),role,obj) )
return authors
#return self.all().select_related().prefetch_related("cache","authorrelation_set")
class Author(Person):
objects=AuthorManager()
class Meta:
proxy = True
def publications(self):
L=[]
for rel in self.authorrelation_set.all().select_related():
L.append( (rel.year,rel.author_role,rel.actual()) )
return L
def get_absolute_url(self):
return "/bibliography/author/%d" % self.pk
def save(self,*args,**kwargs):
Person.save(self,*args,**kwargs)
class AuthorRole(LabeledAbstract):
cover_name = models.BooleanField(default=False)
action = models.CharField(default="",max_length=1024,blank=True)
pos = models.IntegerField(unique=True)
class AuthorRelation(models.Model):
author = models.ForeignKey(Author,on_delete=models.PROTECT)
author_role = models.ForeignKey(AuthorRole,on_delete=models.PROTECT)
content_type = models.ForeignKey(ContentType,editable=False,null=True,on_delete=models.PROTECT)
year = models.IntegerField(editable=False,db_index=True)
#year_label = models.CharField(max_length=10,editable=False)
#title = models.CharField(max_length=4096)
class Meta:
ordering = [ "year" ]
def _year(self): return 0
def _title(self): return ""
def html(self): return ""
def update_year(self):
try:
self.year=self.actual()._year()
except:
self.year=self._year()
self.save()
def actual(self):
model = self.content_type.model
return self.__getattribute__(model)
def save(self,*args, **kwargs):
if (not self.content_type):
self.content_type = ContentType.objects.get_for_model(self.__class__)
try:
self.year=self.actual()._year()
except:
self.year=self._year()
super(AuthorRelation, self).save(*args, **kwargs)
def clean(self,*args,**kwargs):
self.year=self._year()
super(AuthorRelation, self).clean(*args, **kwargs)
class MigrAuthor(models.Model):
cod = models.CharField(max_length=1,default="-",db_index=True)
ind = models.IntegerField(db_index=True)
author = models.ForeignKey(Author,on_delete=models.PROTECT)
def __str__(self): return str(self.cod)+str(self.ind)+" "+str(self.author)
### publishers
class PublisherState(models.Model):
name = models.CharField(max_length=4096)
class Meta:
ordering = ["name"]
def __str__(self): return str(self.name)
class PublisherAddress(models.Model):
city = models.CharField(max_length=4096)
state = models.ForeignKey(PublisherState,on_delete=models.PROTECT)
def __str__(self): return str(self.city)+" - "+str(self.state)
class Meta:
ordering = ["city"]
class PublisherIsbnManager(models.Manager):
def isbn_alpha(self):
return self.all().filter(isbn__iregex=r'^[a-z].*')
def split_isbn(self,unseparated):
if not unseparated: return [],[]
isbn_list=[]
for isbn in unseparated:
for n in range(1,9):
isbn_list.append(isbn[:n])
L=[ v.isbn for v in self.filter(isbn__in=isbn_list) ]
if not L:
return [],unseparated
uns=[]
sep=[]
for isbn in unseparated:
trovato=False
for db_isbn in L:
if isbn.startswith(db_isbn):
trovato=True
isbn_book=isbn[len(db_isbn):]
sep.append( (db_isbn,isbn_book) )
break
if not trovato:
uns.append(isbn)
return sep,uns
class PublisherIsbn(models.Model):
isbn = models.CharField(max_length=4096,unique=True,db_index=True)
preferred = models.ForeignKey("Publisher",editable=False,blank=True,on_delete=models.PROTECT)
objects = PublisherIsbnManager()
class Meta:
ordering = ["isbn"]
def update_preferred(self):
self.preferred=self.get_preferred()
self.save()
def get_preferred(self):
if self._state.adding:
return Publisher.objects.get(pk=0)
pubs=list(self.publisher_set.all())
if len(pubs)==0: return Publisher.objects.get(pk=0)
if len(pubs)!=1:
for p in pubs:
if not p.alias:
return p
return pubs[0]
def clean(self,*args,**kwargs):
self.preferred=self.get_preferred()
super(PublisherIsbn, self).clean(*args, **kwargs)
def save(self,*args,**kwargs):
self.preferred=self.get_preferred()
super(PublisherIsbn, self).save(*args, **kwargs)
def __str__(self): return str(self.isbn)
def publishers(self):
return "; ".join(map(str, self.publisher_set.all()))
class PublisherManager(models.Manager):
def add_prefetch(self,obj_list):
qset=self.filter(id__in=[obj.id for obj in obj_list])
qset=qset.prefetch_related("addresses")
return qset
def look_for(self,isbn_list):
qset=PublisherIsbn.objects.filter(isbn__in=isbn_list)
for pub in qset:
isbn_list.remove( pub.isbn )
isbn_ids=[ obj.id for obj in qset ]
p_qset=self.filter(isbns__id__in=isbn_ids).prefetch_related("isbns","addresses")
return p_qset,isbn_list
class Publisher(models.Model):
name = models.CharField(max_length=4096)
full_name = models.CharField(max_length=4096,blank=True)
url = models.CharField(max_length=4096,default="--")
note = models.TextField(blank=True,default="")
addresses = models.ManyToManyField(PublisherAddress,through='PublisherAddressPublisherRelation',blank=True)
alias = models.BooleanField(default=False)
isbns = models.ManyToManyField(PublisherIsbn,blank=True)
objects=PublisherManager()
class Meta:
ordering = ["name"]
def short_name(self):
name=self.show_name().lower()
tname=name.replace(".","").replace(",","").split()
for s in [ "srl", "spa","editore","editrice","edizioni","verlag","publisher","inc",
"éditions","editions","edition","editorial","editori","editoriale","ltd",
"gruppo","publishing","yayın","yayınları","co","publications","press","editoriali"]:
if s in tname:
tname.remove(s)
tname=[ s.capitalize() for s in tname ]
return " ".join(tname)
def clean(self,*args,**kwargs):
if not self.full_name:
self.full_name=self.name
super(Publisher, self).clean(*args, **kwargs)
def __str__(self): return str(self.name)
def address(self):
return " - ".join([str(x.address.city) for x in self.publisheraddresspublisherrelation_set.order_by("pos")])
def show_name(self):
if self.full_name: return self.full_name
return self.name
def html(self):
H=self.name
adrs=self.address()
if adrs:
H+=", "+adrs
return H
@cached_property
def isbn_prefix(self):
return ", ".join([str(x.isbn) for x in self.isbns.all()])
@cached_property
def isbn_list(self):
return [str(x.isbn) for x in self.isbns.all()]
class PublisherAddressPublisherRelation(PositionAbstract):
address = models.ForeignKey(PublisherAddress,on_delete=models.PROTECT)
publisher = models.ForeignKey(Publisher,on_delete=models.PROTECT)
def __str__(self): return str(self.publisher)+" ["+str(self.pos)+"] "+str(self.address)
class MigrPublisherRiviste(models.Model):
registro = models.CharField(max_length=4096)
publisher = models.ForeignKey(Publisher,on_delete=models.PROTECT)
def __str__(self): return str(self.registro)
### publications
class VolumeType(LabeledAbstract):
read_as = models.CharField(max_length=1024,default="")
class PublicationManager(models.Manager):
def issn_alpha(self):
return self.all().filter(issn_crc='Y')
class Publication(models.Model):
issn = models.CharField(max_length=128) #7
issn_crc = models.CharField(max_length=1,editable=False,default="Y")
publisher = models.ForeignKey(Publisher,on_delete=models.PROTECT)
title = models.CharField(max_length=4096)
volume_type = models.ForeignKey(VolumeType,on_delete=models.PROTECT)
date_format = models.CharField(max_length=4096,default="%Y-%m-%d")
objects=PublicationManager()
#periodicity=models.CharField(max_length=128,choices=[ ("monthly","monthly"),("unknown","unknown") ],default="unknown")
#first_day=models.IntegerField(default=1)
class Meta:
ordering = ['title']
def html(self):
tit=str(self.title)
if not tit: return ""
return "<i>"+tit+"</i>"
def __str__(self): return str(self.title)
def get_absolute_url(self):
return "/bibliography/publication/%d" % self.pk
def update_crc(self):
self.issn_crc = self.crc()
self.save()
def crc(self):
if not str(self.issn).isdigit(): return('Y')
pesi=[8,7,6,5,4,3,2]
cod_lista=list(map(int,list(self.issn)))
if len(cod_lista)<7:
L=len(cod_lista)
cod_lista+=[0 for x in range(L,7)]
crc=11-(sum(map(lambda x,y: x*y,cod_lista,pesi))%11)
if (crc==10): return('X')
if (crc==11): return(0)
return(crc)
def clean(self,*args,**kwargs):
self.issn_crc = self.crc()
super(Publication, self).clean(*args, **kwargs)
def issue_set(self):
return Issue.objects.filter(volume__publication__id=self.id).order_by("date")
class Volume(models.Model):
label = models.CharField(max_length=256,db_index=True)
publication = models.ForeignKey(Publication,on_delete=models.PROTECT)
def __str__(self): return str(self.publication)+" - "+str(self.label)
def html(self):
H=self.publication.html()
if H:
H+=", "
H+=str(self.publication.volume_type.read_as)
if H:
H+=" "
H+=str(self.label)
return H
### publication issues
class IssueType(LabeledAbstract): pass
class IssueManager(models.Manager):
def by_publication(self,publication):
return self.all().filter(volume__publication__id=publication.id).order_by("date")
class Issue(models.Model):
volume = models.ForeignKey(Volume,on_delete=models.PROTECT)
issue_type = models.ForeignKey(IssueType,on_delete=models.PROTECT)
issn_num = models.CharField(max_length=8)
number = models.CharField(max_length=256)
title = models.CharField(max_length=4096,blank=True,default="")
date = models.DateField()
date_ipotetic = models.BooleanField(default=False)
html_cache = models.TextField(blank=True,null=True,default="",editable=False)
authors = models.ManyToManyField(Author,through='IssueAuthorRelation',blank=True)
objects=IssueManager()
class Meta:
ordering = ['date']
def issn(self):
return self.volume.publication.issn
def show_date(self):
D=self.date.strftime(self.volume.publication.date_format)
if self.date_ipotetic:
return D+"?"
return D
def save(self,*args,**kwargs):
self.html_cache=self._html()
return models.Model.save(self,*args,**kwargs)
def html(self): return self.html_cache
def _html(self):
H=self.volume.html()
if H:
H+=", "
H+="n. "+str(self.number)
tit=str(self.title)
if tit:
H+=", <i>"+tit+"</i>"
H+=", "
H+=self.date.strftime("%B %Y")
if self.date_ipotetic:
H+="?"
return H
def __str__(self):
U=str(self.volume)
U+="/"+str(self.number)
if str(self.title):
U+=". "+str(self.title)
return U
def year(self):
return self.date.year
class IssueAuthorRelation(AuthorRelation,PositionAbstract):
issue = models.ForeignKey(Issue,on_delete=models.PROTECT)
def __str__(self): return str(self.author)+", "+str(self.issue)
def _year(self): return int(self.issue.year())
def _title(self): return str(self.issue.title)
def html(self):
print("COM")
print(self.issue.html())
return self.issue.html()
class Meta:
ordering=["pos"]
#unique_together= [ 'author','author_role','issue' ]
def save(self,*args,**kwargs):
if not self.pos:
self.pos=1
return super(IssueAuthorRelation,self).save(*args,**kwargs)
class Article(models.Model):
title = models.CharField(max_length=4096)
issue = models.ForeignKey(Issue,on_delete=models.PROTECT)
page_begin = models.CharField(max_length=10,blank=True,default="x")
page_end = models.CharField(max_length=10,blank=True,default="x")
authors = models.ManyToManyField(Author,through='ArticleAuthorRelation',blank=True)
html_cache = models.TextField(blank=True,null=True,default="",editable=False)
def get_authors(self):
return ", ".join([str(x.author.long_name()) for x in self.articleauthorrelation_set.filter(author_role__cover_name=True).order_by("pos")])
def get_secondary_authors(self):
L=list(self.articleauthorrelation_set.filter(author_role__cover_name=False).order_by("author_role__pos","pos"))
ret=""
curr_pos=-1
comma=True
for rel in L:
if curr_pos!=int(rel.author_role.pos):
action=str(rel.author_role.action).strip()
if action:
if ret:
ret+=", "
ret+=action+" "
comma=False
curr_pos=int(rel.author_role.pos)
if ret and comma: ret+=", "
ret+=rel.author.long_name()
comma=True
return ret
def __str__(self): return str(self.title) #+" ("+unicode(self.year)+")"
def issn(self): return self.issue.issn()
def issn_num(self): return self.issue.issn_num
def year(self): return self.issue.year()
def save(self,*args,**kwargs):
self.html_cache=self._html()
return models.Model.save(self,*args,**kwargs)
def html(self): return self.html_cache
def _html(self):
H=""
H+=self.get_authors()
if H:
H+=", "
H+="“"+str(self.title)+"”, "
sec_authors=self.get_secondary_authors()
if sec_authors:
H+=sec_authors+", "
issue=self.issue.html()
if issue:
H+=issue+", "
if str(self.page_begin)==str(self.page_end):
H+="p. "+str(self.page_begin)
else:
H+="pp. "+str(self.page_begin)+"-"+str(self.page_end)
return H
class ArticleAuthorRelation(AuthorRelation,PositionAbstract):
article = models.ForeignKey(Article,on_delete=models.PROTECT)
def __str__(self): return str(self.author)+", "+str(self.article)
def _year(self): return int(self.article.year())
def _title(self): return str(self.article.title)
def html(self):
print("ART")
print(self.article.html())
return self.article.html()
class Meta:
ordering=["pos"]
### books
class BookManager(models.Manager):
def isbn_alpha(self):
return self.all().filter(isbn_crc10='Y').order_by("isbn_ced","isbn_book","year","title")
def by_isbn_pub(self,isbn):
print("ISBN:",isbn)
return self.all().filter(isbn_ced__iexact=isbn).order_by("isbn_ced","isbn_book","year","title")
def add_prefetch(self,obj_list):
qset=self.filter(id__in=[book.id for book in obj_list])
qset=qset.select_related("publisher").prefetch_related("authors")
return qset
def look_for(self,isbn_list):
if not isbn_list: return None,[]
q=models.Q()
for isbn_ced,isbn_book in isbn_list:
q=q|models.Q(isbn_ced=isbn_ced,isbn_book=isbn_book)
qset=self.filter(q).select_related("publisher").prefetch_related("authors")
new_isbn_list=[]
for book in qset:
isbn_list.remove( (book.isbn_ced,book.isbn_book) )
return qset,isbn_list
class Book(CategorizedObject):
isbn_ced = models.CharField(max_length=9,db_index=True)
isbn_book = models.CharField(max_length=9,db_index=True)
isbn_crc10 = models.CharField(max_length=1,editable=False,default="Y")
isbn_crc13 = models.CharField(max_length=1,editable=False,default="Y")
isbn_cache10 = models.CharField(max_length=20,editable=False,default="")
isbn_cache13 = models.CharField(max_length=20,editable=False,default="")
title = models.CharField(max_length=4096)
year = models.IntegerField()
year_ipotetic = models.BooleanField(default=False)
publisher = models.ForeignKey(Publisher,on_delete=models.PROTECT)
authors = models.ManyToManyField(Author,through='BookAuthorRelation',blank=True)
html_cache = models.TextField(blank=True,default="",editable=False)
objects=BookManager()
class Meta:
ordering=["title","year","publisher"]
index_together=[ ["isbn_ced","isbn_book"] ]
def get_authors(self):
return ", ".join([str(x.author.long_name()) for x in self.bookauthorrelation_set.filter(author_role__cover_name=True).order_by("pos")])
def get_absolute_url(self):
U="/bibliography/book/%d" % self.pk
return U
def get_secondary_authors(self):
L=list(self.bookauthorrelation_set.filter(author_role__cover_name=False).order_by("author_role__pos","pos"))
ret=""
curr_pos=-1
comma=True
for rel in L:
if curr_pos!=int(rel.author_role.pos):
action=str(rel.author_role.action).strip()
if action:
if ret:
ret+=", "
ret+=action+" "
comma=False
curr_pos=int(rel.author_role.pos)
if ret and comma: ret+=", "
ret+=rel.author.long_name()
comma=True
return ret
def __str__(self):
if not self.year_ipotetic:
return str(self.title)+" ("+str(self.year)+")"
return str(self.title)+" ("+str(self.year)+"?)"
@cached_property
def html(self): return self.html_cache
def _html(self):
H=""
H+=self.get_authors()
if H:
H+=", "
H+="<i>"+str(self.title)+"</i>, "
sec_authors=self.get_secondary_authors()
if sec_authors:
H+=sec_authors+", "
pub=self.publisher.html()
if pub:
H+=pub+", "
H+=str(self.year)
if self.year_ipotetic: H+="?"
return H
def clean(self,*args,**kwargs):
self.isbn_crc10 = self.crc10()
self.isbn_crc13 = self.crc13()
self.isbn_cache10=self.isbn_ced+self.isbn_book+str(self.crc10())
self.isbn_cache13='978'+self.isbn_ced+self.isbn_book+str(self.crc13())
super(Book, self).clean(*args, **kwargs)
def save(self,*args,**kwargs):
self.isbn_crc10 = self.crc10()
self.isbn_crc13 = self.crc13()
self.isbn_cache10=self.isbn_ced+self.isbn_book+str(self.crc10())
self.isbn_cache13='978'+self.isbn_ced+self.isbn_book+str(self.crc13())
self.html_cache=self._html()
super(Book, self).save(*args, **kwargs)
def update_crc(self):
self.isbn_crc10 = self.crc10()
self.isbn_crc13 = self.crc13()
self.isbn_cache10=self.isbn_ced+self.isbn_book+str(self.crc10())
self.isbn_cache13='978'+self.isbn_ced+self.isbn_book+str(self.crc13())
self.save()
def isbn10(self):
return str(self.isbn_ced)+"-"+str(self.isbn_book)+"-"+str(self.isbn_crc10)
def isbn13(self):
return "978-"+str(self.isbn_ced)+"-"+str(self.isbn_book)+"-"+str(self.isbn_crc13)
def crc10(self):
if not str(self.isbn_book).isdigit(): return('Y')
if not str(self.isbn_ced).isdigit(): return('Y')
isbn=str(self.isbn_ced)+str(self.isbn_book)
pesi=[10,9,8,7,6,5,4,3,2]
cod_lista=list(map(int,list(isbn)))
if len(cod_lista)<9:
L=len(cod_lista)
cod_lista+=[0 for x in range(L,9)]
crc=11-(sum(map(lambda x,y: x*y,cod_lista,pesi))%11)
if (crc==10): return('X')
if (crc==11): return(0)
return(crc)
def crc13(self):
if not str(self.isbn_book).isdigit(): return('Y')
if not str(self.isbn_ced).isdigit(): return('Y')
isbn=str(self.isbn_ced)+str(self.isbn_book)
pesi=[1,3,1,3,1,3,1,3,1,3,1,3]
cod_lista=[9,7,8]+list(map(int,list(isbn)))
if len(cod_lista)<12:
L=len(cod_lista)
cod_lista+=[0 for x in range(L,12)]
crc=10-(sum(map(lambda x,y: x*y,cod_lista,pesi))%10)
if (crc==10): return(0)
return(crc)
class BookAuthorRelation(AuthorRelation,PositionAbstract):
book = models.ForeignKey(Book,on_delete=models.PROTECT)
def __str__(self): return str(self.author)+", "+str(self.book)
def _year(self): return int(self.book.year)
def _title(self): return str(self.book.title)
def html(self): return self.book.html()
def get_absolute_url(self): return self.book.get_absolute_url()
class Meta:
ordering=["pos"]
class TextsCdrom(LabeledAbstract):
books = models.ManyToManyField(Book,blank=True)
# class BookTimeSpanRelation(models.Model):
# time_span=models.ForeignKey(TimeSpan)
# book=models.OneToOneField(Book)
# def __str__(self):
# return unicode(self.time_span)+u"/"+unicode(self.book)
### repository cache
class RepositoryCacheBook(models.Model):
isbn = models.CharField(max_length=13,unique=True)
publisher = models.CharField(max_length=4096,default=" ")
year = models.CharField(max_length=4096,default=" ",blank=True)
title = models.CharField(max_length=4096,default=" ")
city = models.CharField(max_length=4096,default=" ")
indb = models.BooleanField(default=False)
def clean(self,*args,**kwargs):
if not self.year:
self.year=" "
super(RepositoryCacheBook, self).clean(*args, **kwargs)
def __str__(self):
return str(self.isbn)+" "+str(self.title)
class Meta:
ordering = [ "isbn" ]
class RepositoryCacheAuthor(PositionAbstract):
book = models.ForeignKey(RepositoryCacheBook,on_delete=models.PROTECT)
name = models.CharField(max_length=4096)
role = models.CharField(max_length=4096)
def __str__(self):
return self.name
class Meta:
ordering = [ "name" ]
class RepositoryFailedIsbn(models.Model):
isbn10 = models.CharField(max_length=4096)
isbn13 = models.CharField(max_length=4096)
def __str__(self):
return self.isbn10+"/"+self.isbn13
class Meta:
ordering = [ "isbn10" ]
### others
class BookSerieWithoutIsbn(models.Model):
isbn_ced = models.CharField(max_length=9,db_index=True)
isbn_book_prefix = models.CharField(max_length=9,db_index=True)
title = models.CharField(max_length=4096)
title_prefix = models.CharField(max_length=4096,default='',blank=True)
publisher = models.ForeignKey(Publisher,on_delete=models.PROTECT)
def __str__(self): return str(self.title)
### signals
def category_post_save_handler(sender,instance,created,raw,using,update_fields,**kwargs):
if raw: return
if created:
CategoryTreeNode.objects.create_category(instance)
else:
CategoryTreeNode.objects.update_category(instance)
post_save.connect(category_post_save_handler,sender=Category)
def category_pre_delete_handler(sender,instance,using,**kwargs):
CategoryTreeNode.objects.remove_category(instance)
pre_delete.connect(category_pre_delete_handler,sender=Category)
class CategoryRelationChangeHandler(object):
def __init__(self):
self.old_parents={}
self.old_children={}
def pre_save(self,sender,instance,raw,using,update_fields,**kwargs):
if raw: return
if not instance.id: return
old_obj=CategoryRelation.objects.get(id=instance.id)
self.old_parents[instance.id]=old_obj.parent
self.old_children[instance.id]=old_obj.child
def post_save(self,sender,instance,created,raw,using,update_fields,**kwargs):
if raw: return
if created:
CategoryTreeNode.objects.add_child_category(instance.parent,instance.child)
return
old_parent=None
old_child=None
if instance.id in self.old_parents:
old_parent=self.old_parents[instance.id]
del(self.old_parents[instance.id])
if instance.id in self.old_children:
old_child=self.old_children[instance.id]
del(self.old_children[instance.id])
CategoryTreeNode.objects.update_child_category(old_parent,old_child,instance.parent,instance.child)
categoryrelation_save_handler=CategoryRelationChangeHandler()
post_save.connect(categoryrelation_save_handler.post_save,sender=CategoryRelation)
pre_save.connect(categoryrelation_save_handler.pre_save,sender=CategoryRelation)
def categoryrelation_pre_delete_handler(sender,instance,using,**kwargs):
CategoryTreeNode.objects.remove_child_category(instance.parent,instance.child)
pre_delete.connect(categoryrelation_pre_delete_handler,sender=CategoryRelation)
def categorizedobjectcategoryrelation_m2m_changed_handler(sender, instance, action, reverse,model,pk_set,using,**kwargs):
if action=="post_add":
function=CategoryTreeNode.objects.add_category_relation
elif action=="pre_remove":
function=CategoryTreeNode.objects.remove_category_relation
else:
return
if model==Category:
cat_list=Category.objects.filter(pk__in=list(pk_set))
for cat in cat_list:
function(cat,instance)
return
target_list=model.objects.filter(pk__in=list(pk_set))
for target in target_list:
function(instance,target)
m2m_changed.connect(categorizedobjectcategoryrelation_m2m_changed_handler,sender=Book.categories.through)
# @receiver(django.db.models.signals.m2m_changed, sender=Article.categories.through)
# def modify_articlecategoryrelation_handler(sender, **kwargs):
# print "Modify",kwargs["instance"],"with action:",kwargs["action"],kwargs["model"],kwargs["pk_set"]
| chiara-paci/baskerville | baskervilleweb/bibliography/models.py | Python | gpl-3.0 | 75,809 |
import modules.pumpingsystem as ps
import pandas as pd
import numpy as np
# Pump schedule as per SCADA. rows = pumps, columns 1:=Peak, 2:=Standard, 3:Off-peak
pump_schedule_41 = np.array([[72, 42, 50],
[95, 78, 86],
[110, 110, 110],
[120, 120, 120],
[150, 150, 150]])
pump_schedule_31 = np.array([[77, 45, 45],
[92, 70, 60],
[110, 110, 110],
[120, 120, 120]])
pump_schedule_20 = np.array([[72, 47, 55],
[82, 70, 70],
[91, 87, 92],
[110, 110, 110]])
pump_schedule_IPC = np.array([[80, 45, 45],
[85, 70, 60],
[90, 82, 82],
[110, 110, 110],
[150, 150, 150]])
dummy_pump_schedule_surface = np.array([[150, 150, 150]])
# Inflows into dams
dam_inflow_profiles = pd.read_csv('input/CS3_dam_inflow_profiles.csv.gz')
inflow_41 = np.reshape(dam_inflow_profiles['41L Inflow'].values, (24, 2))
inflow_31 = np.reshape(dam_inflow_profiles['31L Inflow'].values, (24, 2))
inflow_20 = np.reshape(dam_inflow_profiles['20L Inflow'].values, (24, 2))
inflow_IPC = np.reshape(dam_inflow_profiles['IPC Inflow'].values, (24, 2))
inflow_surface = np.reshape(dam_inflow_profiles['Surface Inflow'].values, (24, 2))
# Read actual data for initial conditions and validation
actual_values = pd.read_csv('input/CS3_data_for_validation.csv.gz')
actual_status_41 = actual_values['41L Status'].values
actual_status_31 = actual_values['31L Status'].values
actual_status_20 = actual_values['20L Status'].values
actual_status_IPC = actual_values['IPC Status'].values
initial_level_41 = actual_values['41L Level'][0]
initial_level_31 = actual_values['31L Level'][0]
initial_level_20 = actual_values['20L Level'][0]
initial_level_IPC = actual_values['IPC Level'][0]
initial_level_surface = actual_values['Surface Level'][0]
# Create pump system
pump_system = ps.PumpSystem('CS3')
pump_system.add_level(ps.PumpingLevel("41L", 3000000, initial_level_41,
216.8, 3508.4, pump_schedule_41, actual_status_41[0],
inflow_41, fed_to_level="31L", pump_statuses_for_validation=actual_status_41,
n_mode_max_pumps=2, n_mode_max_level=80, n_mode_control_range=30,
n_mode_top_offset=5))
pump_system.add_level(ps.PumpingLevel("31L", 3000000, initial_level_31,
146.8, 3283.6, pump_schedule_31, actual_status_31[0],
inflow_31, fed_to_level="20L", pump_statuses_for_validation=actual_status_31,
n_mode_max_pumps=2, n_mode_max_level=80, n_mode_control_range=20,
n_mode_top_offset=5, n_mode_bottom_offset=5))
pump_system.add_level(ps.PumpingLevel("20L", 3000000, initial_level_20,
171.8, 3821.0, pump_schedule_20, actual_status_20[0],
inflow_20, fed_to_level="IPC", pump_statuses_for_validation=actual_status_20,
n_mode_max_pumps=2, n_mode_control_range=20, n_mode_top_offset=7,
n_mode_bottom_offset=5))
pump_system.add_level(ps.PumpingLevel("IPC", 3000000, initial_level_IPC,
147.4, 3572.8, pump_schedule_IPC, actual_status_IPC[0],
inflow_IPC, fed_to_level="Surface",
pump_statuses_for_validation=actual_status_IPC,
n_mode_max_pumps=2, n_mode_max_level=80, n_mode_control_range=10,
n_mode_top_offset=5, n_mode_bottom_offset=3))
pump_system.add_level(ps.PumpingLevel("Surface", 5000000, initial_level_surface,
0, 0, dummy_pump_schedule_surface, 0, inflow_surface,
pump_statuses_for_validation=actual_status_IPC,
n_mode_max_pumps=0)) # the status data doesn't matter
# Perform simulations
pump_system.perform_simulation(mode='validation', save=True)
pump_system.perform_simulation(mode='1-factor', save=True)
pump_system.perform_simulation(mode='2-factor', save=True)
pump_system.perform_simulation(mode='n-factor', save=True)
| Mierzen/Dam-Simulation | simulations/Case_study_3/simulation_CS3.py | Python | gpl-3.0 | 4,643 |
default_app_config = "nnmware.apps.booking.apps.BookingAppConfig"
| nnmware/nnmware | apps/booking/__init__.py | Python | gpl-3.0 | 66 |
"""
:mod:`Selectors` -- selection methods module
==============================================================
This module have the *selection methods*, like roulette wheel, tournament, ranking, etc.
"""
import random
import Consts
def GRankSelector(population, **args):
""" The Rank Selector - This selector will pick the best individual of
the population every time.
"""
count = 0
if args["popID"] != GRankSelector.cachePopID:
best_fitness = population.bestFitness().fitness
for index in xrange(1, len(population.internalPop)):
if population[index].fitness == best_fitness:
count += 1
GRankSelector.cachePopID = args["popID"]
GRankSelector.cacheCount = count
else:
count = GRankSelector.cacheCount
return population[random.randint(0, count)]
GRankSelector.cachePopID = None
GRankSelector.cacheCount = None
def GUniformSelector(population, **args):
""" The Uniform Selector """
return population[random.randint(0, len(population) - 1)]
def GTournamentSelector(population, **args):
""" The Tournament Selector
It accepts the *tournamentPool* population parameter.
.. note::
the Tournament Selector uses the Roulette Wheel to
pick individuals for the pool
"""
choosen = None
poolSize = population.getParam("tournamentPool", Consts.CDefTournamentPoolSize)
tournament_pool = [GRouletteWheel(population, **args) for i in xrange(poolSize)]
choosen = min(tournament_pool, key=lambda ind: ind.fitness)
return choosen
def GTournamentSelectorAlternative(population, **args):
""" The alternative Tournament Selector
This Tournament Selector don't uses the Roulette Wheel
It accepts the *tournamentPool* population parameter.
"""
pool_size = population.getParam("tournamentPool", Consts.CDefTournamentPoolSize)
len_pop = len(population)
tournament_pool = [population[random.randint(0, len_pop - 1)] for i in xrange(pool_size)]
choosen = min(tournament_pool, key=lambda ind: ind.fitness)
return choosen
def GRouletteWheel(population, **args):
""" The Roulette Wheel selector """
psum = None
if args["popID"] != GRouletteWheel.cachePopID:
GRouletteWheel.cachePopID = args["popID"]
psum = GRouletteWheel_PrepareWheel(population)
GRouletteWheel.cacheWheel = psum
else:
psum = GRouletteWheel.cacheWheel
cutoff = random.random()
lower = 0
upper = len(population) - 1
while(upper >= lower):
i = lower + ((upper - lower) / 2)
if psum[i] > cutoff:
upper = i - 1
else:
lower = i + 1
lower = min(len(population) - 1, lower)
lower = max(0, lower)
return population.bestFitness(lower)
GRouletteWheel.cachePopID = None
GRouletteWheel.cacheWheel = None
def GRouletteWheel_PrepareWheel(population):
""" A preparation for Roulette Wheel selection """
len_pop = len(population)
psum = [i for i in xrange(len_pop)]
population.statistics()
pop_fitMax = population.stats["fitMax"]
pop_fitMin = population.stats["fitMin"]
if pop_fitMax == pop_fitMin:
for index in xrange(len_pop):
psum[index] = (index + 1) / float(len_pop)
elif (pop_fitMax > 0 and pop_fitMin >= 0) or (pop_fitMax <= 0 and pop_fitMin < 0):
population.sort()
psum[0] = -population[0].fitness + pop_fitMax + pop_fitMin
for i in xrange(1, len_pop):
psum[i] = -population[i].fitness + pop_fitMax + pop_fitMin + psum[i - 1]
for i in xrange(len_pop):
psum[i] /= float(psum[len_pop - 1])
return psum
| UdeM-LBIT/GAPol | lib/ga/evolve/Selectors.py | Python | gpl-3.0 | 3,696 |
# Copyright (C) 2016-2020 Matthias Klumpp <matthias@tenstral.net>
#
# Licensed under the GNU Lesser General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the license, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import re
from typing import List
from apt_pkg import version_compare
from laniakea import LocalConfig, LkModule
from laniakea.repository import Repository, make_newest_packages_dict, version_revision
from laniakea.db import session_scope, config_get_distro_tag, \
ArchiveSuite, ArchiveComponent, ArchiveArchitecture, SourcePackage, SynchrotronIssue, \
SynchrotronIssueKind, SynchrotronSource, SynchrotronConfig, SyncBlacklistEntry
from laniakea.dakbridge import DakBridge
from laniakea.logging import log
from laniakea.msgstream import EventEmitter
class SyncEngine:
'''
Execute package synchronization in Synchrotron
'''
def __init__(self, target_suite_name: str, source_suite_name: str):
self._lconf = LocalConfig()
self._dak = DakBridge()
# FIXME: Don't hardcode this!
repo_name = 'master'
# the repository of the distribution we import stuff into
self._target_repo = Repository(self._lconf.archive_root_dir,
repo_name)
self._target_repo.set_trusted(True)
self._target_suite_name = target_suite_name
self._source_suite_name = source_suite_name
self._distro_tag = config_get_distro_tag()
self._synced_source_pkgs = []
with session_scope() as session:
sync_source = session.query(SynchrotronSource) \
.filter(SynchrotronSource.suite_name == self._source_suite_name).one()
# FIXME: Synchrotron needs adjustments to work
# better with the new "multiple autosync tasks" model.
# This code will need to be revised for that
# (currently it is just a 1:1 translation from D code)
# the repository of the distribution we use to sync stuff from
self._source_repo = Repository(sync_source.repo_url,
sync_source.os_name,
self._lconf.synchrotron_sourcekeyrings)
# we trust everything by default
self._imports_trusted = True
with session_scope() as session:
self._sync_blacklist = set([value for value, in session.query(SyncBlacklistEntry.pkgname)])
def _publish_synced_spkg_events(self, src_os, src_suite, dest_suite, forced=False, emitter=None):
''' Submit events for the synced source packages to the message stream '''
if not emitter:
emitter = EventEmitter(LkModule.SYNCHROTRON)
for spkg in self._synced_source_pkgs:
data = {'name': spkg.name,
'version': spkg.version,
'src_os': src_os,
'suite_src': src_suite,
'suite_dest': dest_suite,
'forced': forced}
emitter.submit_event('src-package-imported', data)
def _get_repo_source_package_map(self, repo, suite_name: str, component_name: str):
''' Get an associative array of the newest source packages present in a repository. '''
suite = ArchiveSuite(suite_name)
component = ArchiveComponent(component_name)
spkgs = repo.source_packages(suite, component)
return make_newest_packages_dict(spkgs)
def _get_repo_binary_package_map(self, repo, suite_name: str, component_name: str,
arch_name: str = None, with_installer: bool = True):
''' Get an associative array of the newest binary packages present in a repository. '''
suite = ArchiveSuite(suite_name)
component = ArchiveComponent(component_name)
arch = ArchiveArchitecture(arch_name)
arch_all = ArchiveArchitecture('all')
bpkgs = repo.binary_packages(suite, component, arch)
bpkgs.extend(repo.binary_packages(suite, component, arch_all)) # always append arch:all packages
if with_installer:
# add d-i packages to the mix
bpkgs.extend(repo.installer_packages(suite, component, arch))
bpkgs.extend(repo.installer_packages(suite, component, arch_all)) # always append arch:all packages
return make_newest_packages_dict(bpkgs)
def _get_target_source_packages(self, component: str):
''' Get mapping of all sources packages in a suite and its parent suite. '''
with session_scope() as session:
target_suite = session.query(ArchiveSuite) \
.filter(ArchiveSuite.name == self._target_suite_name).one()
suite_pkgmap = self._get_repo_source_package_map(self._target_repo,
target_suite.name,
component)
if target_suite.parent:
# we have a parent suite
parent_map = self._get_repo_source_package_map(self._target_repo,
target_suite.parent.name,
component)
# merge the two arrays, keeping only the latest versions
suite_pkgmap = make_newest_packages_dict(list(parent_map.values()) + list(suite_pkgmap.values()))
return suite_pkgmap
def _import_package_files(self, suite: str, component: str, fnames: List[str]):
''' Import an arbitrary amount of packages via the archive management software. '''
return self._dak.import_package_files(suite, component, fnames, self._imports_trusted, True)
def _import_source_package(self, spkg: SourcePackage, component: str) -> bool:
'''
Import a source package from the source repository into the
target repo.
'''
dscfile = None
for f in spkg.files:
# the source repository might be on a remote location, so we need to
# request each file to be there.
# (dak will fetch the files referenced in the .dsc file from the same directory)
if f.fname.endswith('.dsc'):
dscfile = self._source_repo.get_file(f)
self._source_repo.get_file(f)
if not dscfile:
log.error('Critical consistency error: Source package {} in repository {} has no .dsc file.'
.format(spkg.name, self._source_repo.base_dir))
return False
if self._import_package_files(self._target_suite_name, component, [dscfile]):
self._synced_source_pkgs.append(spkg)
return True
return False
def _import_binaries_for_source(self, sync_conf, target_suite, component: str, spkgs: List[SourcePackage],
ignore_target_changes: bool = False) -> bool:
''' Import binary packages for the given set of source packages into the archive. '''
if not sync_conf.sync_binaries:
log.debug('Skipping binary syncs.')
return True
# list of valid architectrures supported by the target
target_archs = [a.name for a in target_suite.architectures]
# cache of binary-package mappings for the source
src_bpkg_arch_map = {}
for aname in target_archs:
src_bpkg_arch_map[aname] = self._get_repo_binary_package_map(self._source_repo, self._source_suite_name, component, aname)
# cache of binary-package mappings from the target repository
dest_bpkg_arch_map = {}
for aname in target_archs:
dest_bpkg_arch_map[aname] = self._get_repo_binary_package_map(self._target_repo, self._target_suite_name, component, aname)
for spkg in spkgs:
bin_files_synced = False
existing_packages = False
for arch_name in target_archs:
if arch_name not in src_bpkg_arch_map:
continue
src_bpkg_map = src_bpkg_arch_map[arch_name]
dest_bpkg_map = dest_bpkg_arch_map[arch_name]
bin_files = []
for bin_i in spkg.binaries:
if bin_i.name not in src_bpkg_map:
if bin_i.name in dest_bpkg_map:
existing_packages = True # package only exists in target
continue
if arch_name != 'all' and bin_i.architectures == ['all']:
# we handle arch:all explicitly
continue
bpkg = src_bpkg_map[bin_i.name]
if bin_i.version != bpkg.source_version:
log.debug('Not syncing binary package \'{}\': Version number \'{}\' does not match source package version \'{}\'.'
.format(bpkg.name, bin_i.version, bpkg.source_version))
continue
ebpkg = dest_bpkg_map.get(bpkg.name)
if ebpkg:
if version_compare(ebpkg.version, bpkg.version) >= 0:
log.debug('Not syncing binary package \'{}/{}\': Existing binary package with bigger/equal version \'{}\' found.'
.format(bpkg.name, bpkg.version, ebpkg.version))
existing_packages = True
continue
# Filter out manual rebuild uploads matching the pattern XbY.
# sometimes rebuild uploads of not-modified packages happen, and if the source
# distro did a binNMU, we don't want to sync that, even if it's bigger
# This rebuild-upload check must only happen if we haven't just updated the source package
# (in that case the source package version will be bigger than the existing binary package version)
if version_compare(spkg.version, ebpkg.version) >= 0:
if re.match(r'(.*)b([0-9]+)', ebpkg.version):
log.debug('Not syncing binary package \'{}/{}\': Existing binary package with rebuild upload \'{}\' found.'
.format(bpkg.name, bpkg.version, ebpkg.version))
existing_packages = True
continue
if not ignore_target_changes and self._distro_tag in version_revision(ebpkg.version):
# safety measure, we should never get here as packages with modifications were
# filtered out previously.
log.debug('Can not sync binary package {}/{}: Target has modifications.'.format(bin_i.name, bin_i.version))
continue
fname = self._source_repo.get_file(bpkg.bin_file)
bin_files.append(fname)
# now import the binary packages, if there is anything to import
if bin_files:
bin_files_synced = True
ret = self._import_package_files(self._target_suite_name, component, bin_files)
if not ret:
return False
if not bin_files_synced and not existing_packages:
log.warning('No binary packages synced for source {}/{}'.format(spkg.name, spkg.version))
return True
def sync_packages(self, component: str, pkgnames: List[str], force: bool = False):
self._synced_source_pkgs = []
with session_scope() as session:
sync_conf = session.query(SynchrotronConfig) \
.join(SynchrotronConfig.destination_suite) \
.join(SynchrotronConfig.source) \
.filter(ArchiveSuite.name == self._target_suite_name,
SynchrotronSource.suite_name == self._source_suite_name).one_or_none()
if not sync_conf:
log.error('Unable to find a sync config for this source/destination combination.')
return False
if not sync_conf.sync_enabled:
log.error('Can not synchronize package: Synchronization is disabled for this configuration.')
return False
target_suite = session.query(ArchiveSuite) \
.filter(ArchiveSuite.name == self._target_suite_name).one()
dest_pkg_map = self._get_target_source_packages(component)
src_pkg_map = self._get_repo_source_package_map(self._source_repo,
self._source_suite_name,
component)
for pkgname in pkgnames:
spkg = src_pkg_map.get(pkgname)
dpkg = dest_pkg_map.get(pkgname)
if not spkg:
log.info('Can not sync {}: Does not exist in source.'.format(pkgname))
continue
if pkgname in self._sync_blacklist:
log.info('Can not sync {}: The package is blacklisted.'.format(pkgname))
continue
if dpkg:
if version_compare(dpkg.version, spkg.version) >= 0:
if force:
log.warning('{}: Target version \'{}\' is newer/equal than source version \'{}\'.'
.format(pkgname, dpkg.version, spkg.version))
else:
log.info('Can not sync {}: Target version \'{}\' is newer/equal than source version \'{}\'.'
.format(pkgname, dpkg.version, spkg.version))
continue
if not force:
if self._distro_tag in version_revision(dpkg.version):
log.error('Not syncing {}/{}: Destination has modifications (found {}).'
.format(spkg.name, spkg.version, dpkg.version))
continue
# sync source package
# the source package must always be known to dak first
ret = self._import_source_package(spkg, component)
if not ret:
return False
ret = self._import_binaries_for_source(sync_conf, target_suite, component, self._synced_source_pkgs, force)
# TODO: Analyze the input, fetch the packages from the source distribution and
# import them into the target in their correct order.
# Then apply the correct, synced override from the source distro.
self._publish_synced_spkg_events(sync_conf.source.os_name,
sync_conf.source.suite_name,
sync_conf.destination_suite.name,
force)
return ret
def autosync(self, session, sync_conf, remove_cruft: bool = True):
''' Synchronize all packages that are newer '''
self._synced_source_pkgs = []
active_src_pkgs = [] # source packages which should have their binary packages updated
res_issues = []
target_suite = session.query(ArchiveSuite) \
.filter(ArchiveSuite.name == self._target_suite_name).one()
sync_conf = session.query(SynchrotronConfig) \
.join(SynchrotronConfig.destination_suite) \
.join(SynchrotronConfig.source) \
.filter(ArchiveSuite.name == self._target_suite_name,
SynchrotronSource.suite_name == self._source_suite_name).one_or_none()
for component in target_suite.components:
dest_pkg_map = self._get_target_source_packages(component.name)
# The source package lists contains many different versions, some source package
# versions are explicitly kept for GPL-compatibility.
# Sometimes a binary package migrates into another suite, dragging a newer source-package
# that it was built against with itslf into the target suite.
# These packages then have a source with a high version number, but might not have any
# binaries due to them migrating later.
# We need to care for that case when doing binary syncs (TODO: and maybe safeguard against it
# when doing source-only syncs too?), That's why we don't filter out the newest packages in
# binary-sync-mode.
if sync_conf.sync_binaries:
src_pkg_range = self._source_repo.source_packages(ArchiveSuite(self._source_suite_name), component)
else:
src_pkg_range = self._get_repo_source_package_map(self._source_repo,
self._source_suite_name,
component).values()
for spkg in src_pkg_range:
# ignore blacklisted packages in automatic sync
if spkg.name in self._sync_blacklist:
continue
dpkg = dest_pkg_map.get(spkg.name)
if dpkg:
if version_compare(dpkg.version, spkg.version) >= 0:
log.debug('Skipped sync of {}: Target version \'{}\' is equal/newer than source version \'{}\'.'
.format(spkg.name, dpkg.version, spkg.version))
continue
# check if we have a modified target package,
# indicated via its Debian revision, e.g. "1.0-0tanglu1"
if self._distro_tag in version_revision(dpkg.version):
log.info('Not syncing {}/{}: Destination has modifications (found {}).'
.format(spkg.name, spkg.version, dpkg.version))
# add information that this package needs to be merged to the issue list
issue = SynchrotronIssue()
issue.package_name = spkg.name
issue.source_version = spkg.version
issue.target_version = dpkg.version
issue.kind = SynchrotronIssueKind.MERGE_REQUIRED
res_issues.append(issue)
continue
# sync source package
# the source package must always be known to dak first
ret = self._import_source_package(spkg, component.name)
if not ret:
return False, []
# a new source package is always active and needs it's binary packages synced, in
# case we do binary syncs.
active_src_pkgs.append(spkg)
# all packages in the target distribution are considered active, as long as they don't
# have modifications.
for spkg in dest_pkg_map.values():
if self._distro_tag in version_revision(spkg.version):
active_src_pkgs.append(spkg)
# import binaries as well. We test for binary updates for all available active source packages,
# as binNMUs might have happened in the source distribution.
# (an active package in this context is any source package which doesn't have modifications in the
# target distribution)
ret = self._import_binaries_for_source(sync_conf, target_suite, component.name, active_src_pkgs)
if not ret:
return False, []
# test for cruft packages
target_pkg_index = {}
for component in target_suite.components:
dest_pkg_map = self._get_repo_source_package_map(self._target_repo,
target_suite.name,
component.name)
for pkgname, pkg in dest_pkg_map.items():
target_pkg_index[pkgname] = pkg
# check which packages are present in the target, but not in the source suite
for component in target_suite.components:
src_pkg_map = self._get_repo_source_package_map(self._source_repo,
self._source_suite_name,
component.name)
for pkgname in src_pkg_map.keys():
target_pkg_index.pop(pkgname, None)
# remove cruft packages
if remove_cruft:
for pkgname, dpkg in target_pkg_index.items():
dpkg_ver_revision = version_revision(dpkg.version, False)
# native packages are never removed
if not dpkg_ver_revision:
continue
# check if the package is intoduced as new in the distro, in which case we won't remove it
if dpkg_ver_revision.startswith('0' + self._distro_tag):
continue
# if this package was modified in the target distro, we will also not remove it, but flag it
# as "potential cruft" for someone to look at.
if self._distro_tag in dpkg_ver_revision:
issue = SynchrotronIssue()
issue.kind = SynchrotronIssueKind.MAYBE_CRUFT
issue.source_suite = self._source_suite_name
issue.target_suite = self._target_suite_name
issue.package_name = dpkg.name
issue.source_version = None
issue.target_version = dpkg.version
res_issues.append(issue)
continue
# check if we can remove this package without breaking stuff
if self._dak.package_is_removable(dpkg.name, target_suite.name):
# try to remove the package
try:
self._dak.remove_package(dpkg.name, target_suite.name)
except Exception as e:
issue = SynchrotronIssue()
issue.kind = SynchrotronIssueKind.REMOVAL_FAILED
issue.source_suite = self._source_suite_name
issue.target_suite = self._target_suite_name
issue.package_name = dpkg.name
issue.source_version = None
issue.target_version = dpkg.version
issue.details = str(e)
res_issues.append(issue)
else:
# looks like we can not remove this
issue = SynchrotronIssue()
issue.kind = SynchrotronIssueKind.REMOVAL_FAILED
issue.source_suite = self._source_suite_name
issue.target_suite = self._target_suite_name
issue.package_name = dpkg.name
issue.source_version = None
issue.target_version = dpkg.version
issue.details = 'This package can not be removed without breaking other packages. It needs manual removal.'
res_issues.append(issue)
self._publish_synced_spkg_events(sync_conf.source.os_name,
sync_conf.source.suite_name,
sync_conf.destination_suite.name,
False)
return True, res_issues
| lkorigin/laniakea | src/synchrotron/synchrotron/syncengine.py | Python | gpl-3.0 | 24,637 |
#!/usr/bin/env python3
import argparse
import sys
import yaml
from pf_focus.markdown import output_markdown
from pf_focus.bbcode import output_bbcode
from pf_focus.parse import parse_pfsense
from pf_focus.pfsense import PfSenseDocument
from pf_focus.progress import Animation
def output_yaml(doc, stream):
yaml.safe_dump(doc.data, stream)
OUTPUT_FORMATS = {
'yaml': output_yaml,
'md': output_markdown,
'bbcode': output_bbcode,
}
def get_output_func(args):
return OUTPUT_FORMATS.get(args.output_format, output_yaml)
def get_progress_animation(args):
return Animation(args.quiet or args.output_path == '-')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-q", dest="quiet", action="store_const", const=True, default=False, help="Hide progress messages")
parser.add_argument("-i", dest="input_path", help="XML input path", required=True)
parser.add_argument("-o", dest="output_path", help="Output path", default="-")
parser.add_argument("-f", dest="output_format", help="Output format", default="yaml", choices=OUTPUT_FORMATS.keys())
return parser.parse_args()
def step_parse(args, doc):
if not args.quiet:
print('\u268b Parsing "{}" ...'.format(args.input_path), file=sys.stderr)
with get_progress_animation(args):
parse_pfsense(args.input_path, doc)
if not args.quiet:
print('\u268d Successfully parsed pfSense config version {}.'.format(doc.pfsense.version), file=sys.stderr)
def step_stdout(args, doc, output_func):
if not args.quiet:
print('\u2631 Outputting to stdout ...', file=sys.stderr)
with get_progress_animation(args):
output_file = sys.stdout
output_func(doc, output_file)
if not args.quiet:
print('\u2630 Successfully outputted pfSense config as {}.'.format(args.output_format), file=sys.stderr)
def step_file(args, doc, output_func):
if not args.quiet:
print('\u2631 Outputting to "{}" ...'.format(args.output_path), file=sys.stderr)
with get_progress_animation(args):
with open(args.output_path, 'w+') as output_file:
output_func(doc, output_file)
if not args.quiet:
print('\u2630 Successfully outputted pfSense config as {}.'.format(args.output_format), file=sys.stderr)
def main():
args = parse_args()
doc = PfSenseDocument()
output_func = get_output_func(args)
step_parse(args, doc)
if args.output_path == '-':
step_stdout(args, doc, output_func)
else:
step_file(args, doc, output_func)
if __name__ == '__main__':
main()
| TKCERT/pfFocus | pf_focus/format.py | Python | gpl-3.0 | 2,598 |
"""
Menu Model [DiamondQuest]
Defines a menu.
Author(s): Wilfrantz Dede, Jason C. McDonald, Stanislav Schmidt
"""
# LICENSE (BSD-3-Clause)
# Copyright (c) 2020 MousePaw Media.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
# CONTRIBUTING
# See https://www.mousepawmedia.com/developers for information
# on how to contribute to our projects.
import abc
import collections
from enum import Enum
import pygame
from diamondquest.common import FontAttributes, FontAttributeDefaults
from diamondquest.model.game import GameModel
class MenuItem(abc.ABC):
"""An abstract base class for menu items.
Attributes
----------
key_down_listeners : dict
A dictionary storing key listeners.
"""
def __init__(self):
self.key_down_listeners = collections.defaultdict(list)
@property
@abc.abstractmethod
def text(self):
"""The text of the menu item."""
@property
@abc.abstractmethod
def text_attributes(self):
"""The text attributes of the menu item."""
def add_key_down_listener(self, key, listener):
"""Add a key down listener.
Parameters
----------
key : int
The key press that should be handled.
listener : function
The handler for the given key press. It should
take no parameters and not return anything.
"""
if listener not in self.key_down_listeners[key]:
self.key_down_listeners[key].append(listener)
def remove_key_down_listener(self, key, listener):
"""Remove a given key listener.
Parameters
----------
key : int
The key press that was handled by the listener.
listener : function
The listener to remove.
Returns
-------
status : bool
If the listener was found and removed then True is
returned, otherwise False.
"""
if listener in self.key_down_listeners[key]:
self.key_down_listeners[key].remove(listener)
return True
else:
return False
def handle_key_press(self, key):
"""Handle key presses when this item is focused.
Parameters
----------
key : int
The key that was pressed.
"""
for listener in self.key_down_listeners[key]:
listener()
class TextItem(MenuItem):
"""A menu item that is only static text."""
def __init__(
self, text, attributes=FontAttributeDefaults.MENU,
):
super().__init__()
self.raw_text = text
self.attributes = attributes
# icon
@property
def text(self):
return self.raw_text
@property
def text_attributes(self):
return self.attributes
class ButtonType(Enum):
STATIC = 0 # text never changes
SCROLL = 1 # left/right arrows scroll through options
INPUT = 2 # user can type into button text
class ButtonItem(MenuItem):
"""An interactive menu item."""
def __init__(
self,
text,
attributes=FontAttributeDefaults.MENU,
button_type=ButtonType.STATIC,
):
super().__init__()
self.text_item = TextItem(text, attributes)
self.button_type = button_type
@property
def text(self):
return self.text_item.text
@property
def text_attributes(self):
return self.text_item.text_attributes
class MenuType(Enum):
GAME = 0
DEV = 1
class MenuModel:
"""The model for the menu."""
menu_items = {} # a dictionary storing button instances
menus = {} # a dictionary storing menu instances
menu_in_use = MenuType.GAME # which menu the game is currently using
@classmethod
def initialize(cls):
cls.menu_items["text_existing_miner"] = TextItem(text="Existing Miner")
cls.menu_items["scroll_existing_miner"] = ButtonItem(
text="<none>", button_type=ButtonType.SCROLL
)
cls.menu_items["text_new_miner"] = TextItem(text="New Miner")
cls.menu_items["input_new_miner"] = ButtonItem(
text="Enter Name", button_type=ButtonType.INPUT
)
cls.menu_items["scroll_music_volume"] = ButtonItem(
text="Music: 10", button_type=ButtonType.SCROLL
)
cls.menu_items["scroll_sound_volume"] = ButtonItem(
text="Sound: 10", button_type=ButtonType.SCROLL
)
cls.menu_items["button_quit"] = ButtonItem(text="QUIT")
cls.menu_items["button_quit"].add_key_down_listener(
pygame.K_RETURN, lambda: GameModel.stop_game()
)
cls.menus[MenuType.GAME] = MenuModel(
title="DiamondQuest",
items=[
cls.menu_items["text_existing_miner"],
cls.menu_items["scroll_existing_miner"],
cls.menu_items["text_new_miner"],
cls.menu_items["input_new_miner"],
cls.menu_items["scroll_music_volume"],
cls.menu_items["scroll_sound_volume"],
cls.menu_items["button_quit"],
],
)
cls.menus[MenuType.DEV] = MenuModel(title="DevMenu", items=[])
@classmethod
def get_menu(cls, menu_type=None):
"""Called by the View to get the contents of the menu."""
# If no specific menu is requested, get the default.
if menu_type is None:
menu_type = cls.menu_in_use
if menu_type not in cls.menus:
raise ValueError(f"No such menu type {menu_type}")
return cls.menus[menu_type]
@classmethod
def use_menu(cls, menu_type):
"""Select which menu to use by default."""
cls.menu_in_use = menu_type
def __init__(self, title, items):
self.title = TextItem(title)
self.items = items
self.selectable_items = [
i for i, item in enumerate(items) if isinstance(item, ButtonItem)
]
self.which_selected = 0 if len(self.selectable_items) > 0 else -1
@property
def selected_item_idx(self):
if self.which_selected == -1:
return -1
return self.selectable_items[self.which_selected]
def __iter__(self):
iter(self.items)
@classmethod
def select_next_item(cls):
menu = cls.get_menu()
n_items = len(menu.selectable_items)
menu.which_selected = (menu.which_selected + 1) % n_items
@classmethod
def select_prev_item(cls):
menu = cls.get_menu()
n_items = len(menu.selectable_items)
menu.which_selected = (menu.which_selected - 1 + n_items) % n_items
@classmethod
def get_selected_item(cls):
menu = cls.get_menu()
idx = menu.selected_item_idx
if idx > 0:
return menu.items[idx]
else:
return None
| mousepawgames/diamondquest | src/diamondquest/model/menu/menu.py | Python | gpl-3.0 | 8,321 |
# -*- coding: utf-8 -*-
import os
from collections import defaultdict
from random import choice
world = defaultdict(int)
possiblepoints = [(x, y) for x in range(-15, 16)
for y in range(-15, 16)
if 10 <= abs(x + y * 1j) <= 15]
for i in range(100):
world[choice(possiblepoints)] += 1
for x in range(-15, 16):
print(''.join(str(min([9, world[(x, y)]])) if world[(x, y)] else ' '
for y in range(-15, 16)))
for i in range(1000):
world[choice(possiblepoints)] += 1
for x in range(-15, 16):
print(''.join(str(min([9, world[(x, y)]])) if world[(x, y)] else ' '
for y in range(-15, 16)))
os.system("pause")
| NicovincX2/Python-3.5 | Théorie des nombres/Nombre/Nombre aléatoire/random_points_on_a_circle.py | Python | gpl-3.0 | 691 |
# AsteriskLint -- an Asterisk PBX config syntax checker
# Copyright (C) 2019 Walter Doekes, OSSO B.V.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ..base import AppBase
class SoftHangup(AppBase):
pass
def register(app_loader):
app_loader.register(SoftHangup())
| ossobv/asterisklint | asterisklint/app/vall/app_softhangup.py | Python | gpl-3.0 | 870 |
import unittest
from rdflib import URIRef
from owlapy import model
from owlapy.util.hashcode import HashCode
from owlapy.vocab.owlfacet import OWLFacet
class TestHashCode(unittest.TestCase):
def test_hash_ontology(self):
ont_id = model.OWLOntologyID()
data_factory = model.OWLDataFactory()
man = model.OWLOntologyManager(data_factory)
ont = model.OWLOntology(man, ont_id)
ont_id_hash = hash(ont_id)
self.assertEqual(ont_id_hash, HashCode.hash_code(ont))
def test_asym_obj_prop_axiom(self):
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
asym_obj_prop = model.OWLAsymmetricObjectPropertyAxiom(prop, anns)
asym_obj_prop_hash = (((3 * HashCode.MULT) + hash(prop)) *
HashCode.MULT) + HashCode._hash_list(anns)
self.assertEqual(asym_obj_prop_hash, HashCode.hash_code(asym_obj_prop))
def test_cls_assertion_axiom(self):
indiv = model.OWLNamedIndividual(model.IRI('http://ex.org/indivXYZ'))
ce = model.OWLClass(model.IRI('http://ex.org/SomeCls'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
cls_ass_ax = model.OWLClassAssertionAxiom(indiv, ce, anns)
cls_ass_ax_hash = (((((7 * HashCode.MULT) + hash(indiv)) *
HashCode.MULT) + hash(ce)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(cls_ass_ax_hash, HashCode.hash_code(cls_ass_ax))
def test_data_prop_assertion_axiom(self):
subj = model.OWLNamedIndividual(model.IRI('http://ex.org/indivABC'))
prop = model.OWLDataProperty(model.IRI('http://ex.org/prop'))
val = model.OWLLiteral('abcd')
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLDataPropertyAssertionAxiom(subj, prop, val, anns)
axiom_hash = (((((((11 * HashCode.MULT) + hash(subj)) *
HashCode.MULT) + hash(prop)) * HashCode.MULT) +
hash(val)) * HashCode.MULT) + HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_data_prop_dom_axiom(self):
prop = model.OWLDataProperty(model.IRI('http://ex.org/prop'))
dom = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLDataPropertyDomainAxiom(prop, dom, anns)
axiom_hash = (((((13 * HashCode.MULT) + hash(prop)) *
HashCode.MULT) + hash(dom)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_data_prop_range_axiom(self):
prop = model.OWLDataProperty(model.IRI('http://ex.org/prop'))
rnge = model.OWLDatatype(model.IRI('http://ex.org/dtype/int'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLDataPropertyRangeAxiom(prop, rnge, anns)
axiom_hash = (((((17 * HashCode.MULT) + hash(prop)) *
HashCode.MULT) + hash(rnge)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_sub_data_prop_of_axiom(self):
sub_prop = model.OWLDataProperty('http://ex.org/subProp')
super_prop = model.OWLDataProperty('http://ex.org/superProp')
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLSubDataPropertyOfAxiom(sub_prop, super_prop, anns)
axiom_hash = (((((19 * HashCode.MULT) + hash(sub_prop)) *
HashCode.MULT) + hash(super_prop)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_declaration_axiom(self):
entity = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLDeclarationAxiom(entity, anns)
axiom_hash = (((23 * HashCode.MULT) + hash(entity)) *
HashCode.MULT) + HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_diff_indivs_axiom(self):
indiv1 = model.OWLNamedIndividual(model.IRI('http://ex.org/indivABC'))
indiv2 = model.OWLAnonymousIndividual(model.NodeID('_:23'))
indivs = {indiv1, indiv2}
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLDifferentIndividualsAxiom(indivs, anns)
axiom_hash = (((29 * HashCode.MULT) + HashCode._hash_list(indivs)) *
HashCode.MULT) + HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_disjoint_classes_axiom(self):
ce1 = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
filler = model.OWLClass(model.IRI('http://ex.org/AnotherClass'))
ce2 = model.OWLObjectSomeValuesFrom(prop, filler)
ces = {ce1, ce2}
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLDisjointClassesAxiom(ces, anns)
axiom_hash = (((31 * HashCode.MULT) + HashCode._hash_list(ces)) *
HashCode.MULT) + HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_disjoint_data_props_axiom(self):
prop1 = model.OWLDataProperty(model.IRI('http://ex.org/prop1'))
prop2 = model.OWLDataProperty(model.IRI('http://ex.org/prop2'))
properties = {prop1, prop2}
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLDisjointDataPropertiesAxiom(properties, anns)
axiom_hash = (((37 * HashCode.MULT) +
HashCode._hash_list(properties)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_disjoint_obj_props_axiom(self):
prop1 = model.OWLObjectProperty(model.IRI('http://ex.org/prop1'))
prop2 = model.OWLObjectProperty(model.IRI('http://ex.org/prop2'))
prop3 = model.OWLObjectInverseOf(model.IRI('http://ex.org/prop3'))
properties = {prop1, prop2, prop3}
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLDisjointObjectPropertiesAxiom(properties, anns)
axiom_hash = (((41 * HashCode.MULT) +
HashCode._hash_list(properties)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_disjoint_union_axiom(self):
cls = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
ce1 = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
filler = model.OWLClass(model.IRI('http://ex.org/AnotherClass'))
ce2 = model.OWLObjectAllValuesFrom(prop, filler)
ces = {ce1, ce2}
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLDisjointUnionAxiom(cls, ces, anns)
axiom_hash = (((((43 * HashCode.MULT) + hash(axiom.owl_class)) *
HashCode.MULT) + HashCode._hash_list(ces)) *
HashCode.MULT) + HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_annotation_assertion_axiom(self):
subj = model.IRI('http://ex.org/sth')
prop = model.OWLAnnotationProperty(model.IRI('http://ex.org/prop'))
val = model.OWLLiteral('abcabc')
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLAnnotationAssertionAxiom(subj, prop, val, anns)
axiom_hash = (((((((47 * HashCode.MULT) + hash(subj)) *
HashCode.MULT) + hash(prop)) * HashCode.MULT) +
hash(val)) * HashCode.MULT) + HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_equiv_classes_axiom(self):
ce1 = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
prop1 = model.OWLObjectProperty(model.IRI('http://ex.org/prop1'))
filler1 = model.OWLClass(model.IRI('http://ex.org/AnotherClass'))
ce2 = model.OWLObjectSomeValuesFrom(prop1, filler1)
prop2 = model.OWLObjectProperty(model.IRI('http://ex.org/prop2'))
filler2 = model.OWLClass(model.IRI('http://ex.org/YetAnotherClass'))
ce3 = model.OWLObjectAllValuesFrom(prop2, filler2)
classes = {ce1, ce2, ce3}
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLEquivalentClassesAxiom(classes, anns)
axiom_hash = (((53 * HashCode.MULT) + HashCode._hash_list(classes)) *
HashCode.MULT) + HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_equiv_data_prop_axiom(self):
prop1 = model.OWLDataProperty(model.IRI('http://ex.org/prop1'))
prop2 = model.OWLDataProperty(model.IRI('http://ex.org/prop2'))
properties = {prop1, prop2}
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLEquivalentDataPropertiesAxiom(properties, anns)
axiom_hash = (((59 * HashCode.MULT) +
HashCode._hash_list(properties)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_equiv_obj_prop_axiom(self):
prop1 = model.OWLObjectProperty(model.IRI('http://ex.org/prop1'))
prop2 = model.OWLObjectProperty(model.IRI('http://ex.org/prop2'))
properties = {prop1, prop2}
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLEquivalentObjectPropertiesAxiom(properties, anns)
axiom_hash = (((61 * HashCode.MULT) +
HashCode._hash_list(properties)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_functional_data_prop_axiom(self):
prop = model.OWLDataProperty(model.IRI('http://ex.org/prop'))
rnge = model.OWLDatatype(model.IRI('http://ex.org/dtype/int'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLFunctionalDataPropertyAxiom(prop, rnge, anns)
axiom_hash = (((67 * HashCode.MULT) + hash(prop)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_functional_obj_prop_axiom(self):
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLFunctionalObjectPropertyAxiom(prop, anns)
axiom_hash = (((71 * HashCode.MULT) + hash(prop)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_inv_functional_obj_prop_axiom(self):
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLInverseFunctionalObjectPropertyAxiom(prop, anns)
axiom_hash = (((79 * HashCode.MULT) + hash(prop)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_inv_obj_props_axiom(self):
prop1 = model.OWLObjectProperty(model.IRI('http://ex.org/prop1'))
prop2 = model.OWLObjectProperty(model.IRI('http://ex.org/prop2'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLInverseObjectPropertiesAxiom(prop1, prop2, anns)
axiom_hash = (((83 * HashCode.MULT) + hash(prop1) + hash(prop2)) *
HashCode.MULT) + HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_irrefl_obj_prop_axiom(self):
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLIrreflexiveObjectPropertyAxiom(prop, anns)
axiom_hash = (((89 * HashCode.MULT) + hash(prop)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_negative_data_prop_assertion_axiom(self):
subj = model.OWLNamedIndividual(model.IRI('http://ex.org/indivABC'))
prop = model.OWLDataProperty(model.IRI('http://ex.org/prop'))
obj = model.OWLLiteral('acab')
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLNegativeDataPropertyAssertionAxiom(
subj, prop, obj, anns)
axiom_hash = (((((((97 * HashCode.MULT) + hash(subj)) *
HashCode.MULT) + hash(prop)) * HashCode.MULT) +
hash(obj)) * HashCode.MULT) + HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_negative_obj_prop_assertion_axiom(self):
subj = model.OWLNamedIndividual(model.IRI('http://ex.org/indivXYZ'))
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
obj = model.OWLAnonymousIndividual(model.NodeID('_:23'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLNegativeObjectPropertyAssertionAxiom(
subj, prop, obj, anns)
axiom_hash = (((((((101 * HashCode.MULT) + hash(subj)) *
HashCode.MULT) + hash(prop)) * HashCode.MULT) +
hash(obj)) * HashCode.MULT) + HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_obj_prop_assertion_axiom(self):
subj = model.OWLAnonymousIndividual(model.NodeID('_:23'))
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
obj = model.OWLNamedIndividual(model.IRI('http://ex.org/indivABC'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLObjectPropertyAssertionAxiom(subj, prop, obj, anns)
axiom_hash = (((((((103 * HashCode.MULT) + hash(subj)) *
HashCode.MULT) + hash(prop)) * HashCode.MULT) +
hash(obj)) * HashCode.MULT) + HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_sub_prop_chain_of_axiom(self):
prop1 = model.OWLObjectProperty(model.IRI('http://ex.org/prop1'))
prop2 = model.OWLObjectProperty(model.IRI('http://ex.org/prop2'))
prop3 = model.OWLObjectProperty(model.IRI('http://ex.org/prop3'))
prop_chain = [prop1, prop2, prop3]
super_prop = model.OWLObjectProperty(model.IRI('http://ex.org/sProp'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLSubPropertyChainOfAxiom(prop_chain, super_prop, anns)
axiom_hash = (((((107 * HashCode.MULT) +
HashCode._hash_list(prop_chain)) * HashCode.MULT) +
hash(super_prop)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_obj_prop_dom_axiom(self):
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
cls1 = model.OWLClass(model.IRI('http://ex.org/SomeCls'))
cls2 = model.OWLClass(model.IRI('http://ex.org/AnotherCls'))
operands = {cls1, cls2}
dom = model.OWLObjectUnionOf(operands)
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLObjectPropertyDomainAxiom(prop, dom, anns)
axiom_hash = (((((109 * HashCode.MULT) + hash(prop)) *
HashCode.MULT) + hash(dom)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_obj_prop_range_axiom(self):
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
rnge = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLObjectPropertyRangeAxiom(prop, rnge, anns)
axiom_hash = (((((113 * HashCode.MULT) + hash(prop)) *
HashCode.MULT) + hash(rnge)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_sub_obj_prop_of_axiom(self):
sub_prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop1'))
super_prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop2'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLSubObjectPropertyOfAxiom(sub_prop, super_prop, anns)
axiom_hash = (((((127 * HashCode.MULT) + hash(sub_prop)) *
HashCode.MULT) + hash(super_prop)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_refl_obj_prop_axiom(self):
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLReflexiveObjectPropertyAxiom(prop, anns)
axiom_hash = (((131 * HashCode.MULT) + hash(prop)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_same_indiv_axiom(self):
indiv1 = model.OWLAnonymousIndividual(model.NodeID('_:23'))
indiv2 = model.OWLNamedIndividual(model.IRI('http://ex.org/indivABC'))
indivs = {indiv1, indiv2}
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLSameIndividualAxiom(indivs, anns)
axiom_hash = (((137 * HashCode.MULT) + HashCode._hash_list(indivs)) *
HashCode.MULT) + HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_sub_cls_of_axiom(self):
sub_cls = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
cls1 = model.OWLClass(model.IRI('http://ex.org/AnotherClass'))
cls2 = model.OWLClass(model.IRI('http://ex.org/YetAnotherClass'))
operands = {cls1, cls2}
super_cls = model.OWLObjectIntersectionOf(operands)
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLSubClassOfAxiom(sub_cls, super_cls, anns)
axiom_hash = (((((139 * HashCode.MULT) + hash(sub_cls)) *
HashCode.MULT) + hash(super_cls)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_symm_obj_prop_axiom(self):
prop = model.OWLObjectProperty(model.IRI('http://ex.org/property'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLSymmetricObjectPropertyAxiom(prop, anns)
axiom_hash = (((149 * HashCode.MULT) + hash(prop)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_trans_obj_prop_axiom(self):
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLTransitiveObjectPropertyAxiom(prop, anns)
axiom_hash = (((151 * HashCode.MULT) + hash(prop)) * HashCode.MULT) + \
HashCode._hash_list(anns)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_cls(self):
iri = model.IRI('http://ex.org/SomeCls')
cls = model.OWLClass(iri)
cls_hash = (157 * HashCode.MULT) + hash(iri)
self.assertEqual(cls_hash, HashCode.hash_code(cls))
def test_data_all_vals_from(self):
prop = model.OWLDataProperty(model.IRI('http://ex.org/prop'))
filler = model.OWLDatatype(model.IRI('http://ex.org/dtype/int'))
ce = model.OWLDataAllValuesFrom(prop, filler)
ce_hash = (((163 * HashCode.MULT) + hash(prop)) * HashCode.MULT) + \
hash(filler)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_data_exact_cardinality(self):
prop = model.OWLDataProperty(model.IRI('http://ex.org/prop'))
card = 5
filler = model.OWLDatatype(model.IRI('http://ex.org/dtype/int'))
ce = model.OWLDataExactCardinality(prop, card, filler)
ce_hash = (((((167 * HashCode.MULT) + hash(prop)) * HashCode.MULT) +
card) * HashCode.MULT) + hash(filler)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_data_max_cardinality(self):
prop = model.OWLDataProperty(model.IRI('http://ex.org/prop'))
card = 23
filler = model.OWLDatatype(model.IRI('http://ex.org/dtype/str'))
ce = model.OWLDataMaxCardinality(prop, card, filler)
ce_hash = (((((173 * HashCode.MULT) + hash(prop)) * HashCode.MULT) +
card) * HashCode.MULT) + hash(filler)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_data_min_cardinality(self):
prop = model.OWLDataProperty(model.IRI('http://ex.org/prop'))
card = 5
filler = model.OWLDatatype(model.IRI('http://ex.org/dtype/int'))
ce = model.OWLDataMinCardinality(prop, card, filler)
ce_hash = (((((179 * HashCode.MULT) + hash(prop)) * HashCode.MULT) +
card) * HashCode.MULT) + hash(filler)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_data_some_vals_from(self):
prop = model.OWLDataProperty(model.IRI('http://ex.org/prop'))
filler = model.OWLDatatype(model.IRI('http://ex.org/dtype/int'))
ce = model.OWLDataSomeValuesFrom(prop, filler)
ce_hash = (((181 * HashCode.MULT) + hash(prop)) * HashCode.MULT) + \
hash(filler)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_data_has_val(self):
prop = model.OWLDataProperty(model.IRI('http://ex.org/prop'))
val = model.OWLLiteral('abc')
ce = model.OWLDataHasValue(prop, val)
ce_hash = (((191 * HashCode.MULT) + hash(prop)) * HashCode.MULT) + \
hash(val)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_obj_all_vals_from(self):
prop = model.OWLObjectProperty(model.IRI('http://ex.org/property'))
ce1 = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
ce2 = model.OWLClass(model.IRI('http://ex.org/AnotherClass'))
operands = {ce1, ce2}
filler = model.OWLObjectUnionOf(operands)
ce = model.OWLObjectAllValuesFrom(prop, filler)
ce_hash = (((193 * HashCode.MULT) + hash(prop)) * HashCode.MULT) + \
hash(filler)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_obj_complement_of(self):
operand = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
ce = model.OWLObjectComplementOf(operand)
ce_hash = (197 * HashCode.MULT) + hash(operand)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_obj_exact_cardinality(self):
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
card = 5
cls1 = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
cls2 = model.OWLClass(model.IRI('http://ex.org/AnotherClass'))
operands = {cls1, cls2}
filler = model.OWLObjectUnionOf(operands)
ce = model.OWLObjectExactCardinality(prop, card, filler)
ce_hash = (((((199 * HashCode.MULT) + hash(prop)) * HashCode.MULT) +
card) * HashCode.MULT) + hash(filler)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_obj_intersect_of(self):
ce1 = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
ce2 = model.OWLClass(model.IRI('http://ex.org/AnotherClass'))
ce3 = model.OWLClass(model.IRI('http://ex.org/YetAnotherClass'))
operands = {ce1, ce2, ce3}
ce = model.OWLObjectIntersectionOf(operands)
ce_hash = (211 * HashCode.MULT) + HashCode._hash_list(operands)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_obj_max_cardinality(self):
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
card = 5
filler = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
ce = model.OWLObjectMaxCardinality(prop, card, filler)
ce_hash = (((((223 * HashCode.MULT) + hash(prop)) * HashCode.MULT) +
card) * HashCode.MULT) + hash(filler)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_obj_min_cardinality(self):
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
card = 25
filler = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
ce = model.OWLObjectMinCardinality(prop, card, filler)
ce_hash = (((((227 * HashCode.MULT) + hash(prop)) * HashCode.MULT) +
card) * HashCode.MULT) + hash(filler)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_obj_one_of(self):
indiv1 = model.OWLNamedIndividual(model.IRI('http://ex.org/indivXYZ'))
indiv2 = model.OWLAnonymousIndividual(model.NodeID('_:23'))
values = {indiv1, indiv2}
ce = model.OWLObjectOneOf(values)
ce_hash = (229 * HashCode.MULT) + HashCode._hash_list(values)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_obj_has_self(self):
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
ce = model.OWLObjectHasSelf(prop)
ce_hash = (233 * HashCode.MULT) + hash(prop)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_obj_some_val_from(self):
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
filler = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
ce = model.OWLObjectSomeValuesFrom(prop, filler)
ce_hash = (((239 * HashCode.MULT) + hash(prop)) * HashCode.MULT) + \
hash(filler)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_obj_union_of(self):
ce1 = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
ce2 = model.OWLClass(model.IRI('http://ex.org/AnotherClass'))
ce3 = model.OWLClass(model.IRI('http://ex.org/YetAnotherClass'))
operands = {ce1, ce2, ce3}
ce = model.OWLObjectUnionOf(operands)
ce_hash = (241 * HashCode.MULT) + HashCode._hash_list(operands)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_obj_has_val(self):
prop = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
val = model.OWLNamedIndividual(model.IRI('http://ex.org/indivABC'))
ce = model.OWLObjectHasValue(prop, val)
ce_hash = (((251 * HashCode.MULT) + hash(prop)) * HashCode.MULT) + \
hash(val)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_data_complement_of(self):
rnge = model.OWLDatatype(model.IRI('http://ex.org/dtype/int'))
ce = model.OWLDataComplementOf(rnge)
ce_hash = (257 * HashCode.MULT) + hash(rnge)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_data_one_of(self):
val1 = model.OWLLiteral('abc')
val2 = model.OWLLiteral('def')
val3 = model.OWLLiteral('ghi')
values = {val1, val2, val3}
ce = model.OWLDataOneOf(values)
ce_hash = (263 * HashCode.MULT) + HashCode._hash_list(values)
self.assertEqual(ce_hash, HashCode.hash_code(ce))
def test_datatype(self):
iri = model.IRI('http://ex.org/dtype/int')
dtype = model.OWLDatatype(iri)
dtype_hash = (269 * HashCode.MULT) + hash(iri)
self.assertEqual(dtype_hash, HashCode.hash_code(dtype))
def test_datatype_restr(self):
dtype = model.OWLDatatype(model.IRI('http://ex.org/dtype/int'))
facet1 = OWLFacet.LENGTH
facet_val1 = model.OWLLiteral('23')
fr1 = model.OWLFacetRestriction(facet1, facet_val1)
facet2 = OWLFacet.MAX_EXCLUSIVE
facet_value2 = model.OWLLiteral('5')
fr2 = model.OWLFacetRestriction(facet2, facet_value2)
facet_restrictions = {fr1, fr2}
node = model.OWLDatatypeRestriction(dtype, facet_restrictions)
node_hash = (((271 * HashCode.MULT) + hash(dtype)) * HashCode.MULT) + \
HashCode._hash_list(facet_restrictions)
self.assertEqual(node_hash, HashCode.hash_code(node))
def test_facet_restr(self):
facet = OWLFacet.LENGTH
facet_val = model.OWLLiteral('23')
node = model.OWLFacetRestriction(facet, facet_val)
node_hash = (((563 * HashCode.MULT) + hash(facet)) * HashCode.MULT) + \
hash(facet_val)
self.assertEqual(node_hash, HashCode.hash_code(node))
def test_literal(self):
literal = model.OWLLiteral('abc')
self.assertEqual(hash(literal), HashCode.hash_code(literal))
def test_data_prop(self):
iri = model.IRI('http://ex.org/dprop')
prop = model.OWLDataProperty(iri)
prop_hash = (283 * HashCode.MULT) + hash(iri)
self.assertEqual(prop_hash, HashCode.hash_code(prop))
def test_object_prop(self):
iri = model.IRI('http://ex.org/prop')
prop = model.OWLObjectProperty(iri)
prop_hash = (293 * HashCode.MULT) + hash(iri)
self.assertEqual(prop_hash, HashCode.hash_code(prop))
def test_obj_inv_of(self):
inv_prop = model.OWLObjectProperty(model.IRI('http://ex.org/iProp'))
prop = model.OWLObjectInverseOf(inv_prop)
prop_hash = (307 * HashCode.MULT) + hash(inv_prop)
self.assertEqual(prop_hash, HashCode.hash_code(prop))
def test_named_indiv(self):
iri = model.IRI('http://ex.org/indivABC')
indiv = model.OWLNamedIndividual(iri)
indiv_hash = (311 * HashCode.MULT) + hash(iri)
self.assertEqual(indiv_hash, HashCode.hash_code(indiv))
def test_swrl_rule(self):
pred1 = model.OWLDataProperty(model.IRI('http://ex.org/prop1'))
b_arg0 = model.SWRLLiteralArgument(model.OWLLiteral('abc'))
b_arg1 = model.SWRLLiteralArgument(model.OWLLiteral('def'))
body = model.SWRLDataPropertyAtom(pred1, b_arg0, b_arg1)
pred2 = model.OWLDataProperty(model.IRI('http://ex.org/prop2'))
h_arg0 = model.SWRLLiteralArgument(model.OWLLiteral('23'))
h_arg1 = model.SWRLLiteralArgument(model.OWLLiteral('42'))
head = model.SWRLDataPropertyAtom(pred2, h_arg0, h_arg1)
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
rule = model.SWRLRule(body, head, anns)
rule_hash = (((631 * HashCode.MULT) + hash(body)) * HashCode.MULT) +\
hash(head)
self.assertEqual(rule_hash, HashCode.hash_code(rule))
def test_swrl_cls_atom(self):
pred = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
indiv = model.OWLNamedIndividual(model.IRI('http://ex.org/indivXYZ'))
arg = model.SWRLIndividualArgument(indiv)
atom = model.SWRLClassAtom(pred, arg)
atom_hash = (((641 * HashCode.MULT) + hash(arg)) * HashCode.MULT) + \
hash(pred)
self.assertEqual(atom_hash, HashCode.hash_code(atom))
def test_srwl_data_range_atom(self):
pred = model.OWLDatatype(model.IRI('http://ex.org/SomeClass'))
literal = model.OWLLiteral('foo')
arg = model.SWRLLiteralArgument(literal)
atom = model.SWRLDataRangeAtom(pred, arg)
atom_hash = (((643 * HashCode.MULT) + hash(arg)) * HashCode.MULT) + \
hash(pred)
self.assertEqual(atom_hash, HashCode.hash_code(atom))
def test_swrl_obj_prop_atom(self):
pred = model.OWLObjectProperty(model.IRI('http://ex.org/prop'))
indiv0 = model.OWLNamedIndividual(model.IRI('http://ex.org/indivABC'))
arg0 = model.SWRLIndividualArgument(indiv0)
indiv1 = model.OWLAnonymousIndividual(model.NodeID('_:23'))
arg1 = model.SWRLIndividualArgument(indiv1)
atom = model.SWRLObjectPropertyAtom(pred, arg0, arg1)
atom_hash = (((((647 * HashCode.MULT) + hash(arg0)) * HashCode.MULT) +
hash(arg1)) * HashCode.MULT) + hash(pred)
self.assertEqual(atom_hash, HashCode.hash_code(atom))
def test_swrl_data_prop_atom(self):
pred = model.OWLDataProperty(model.IRI('http://ex.org/prop'))
arg0 = model.SWRLLiteralArgument(model.OWLLiteral('abc'))
arg1 = model.SWRLLiteralArgument(model.OWLLiteral('def'))
atom = model.SWRLDataPropertyAtom(pred, arg0, arg1)
atom_hash = (((((653 * HashCode.MULT) + hash(arg0)) * HashCode.MULT) +
hash(arg1)) * HashCode.MULT) + hash(pred)
self.assertEqual(atom_hash, HashCode.hash_code(atom))
def test_swrl_built_in_atom(self):
pred = model.IRI('http://ex.org/sth')
arg1 = model.SWRLLiteralArgument(model.OWLLiteral('abc'))
arg2 = model.SWRLLiteralArgument(model.OWLLiteral('def'))
arg3 = model.SWRLLiteralArgument(model.OWLLiteral('ghi'))
args = [arg1, arg2, arg3]
atom = model.SWRLBuiltInAtom(pred, args)
atom_hash = (((659 * HashCode.MULT) + HashCode._hash_list(args)) *
HashCode.MULT) + hash(pred)
self.assertEqual(atom_hash, HashCode.hash_code(atom))
def test_swrl_variable(self):
iri = model.IRI('http://ex.org/sth')
var = model.SWRLVariable(iri)
var_hash = (661 * HashCode.MULT) + hash(iri)
self.assertEqual(var_hash, HashCode.hash_code(var))
def test_swrl_indiv_arg(self):
indiv = model.OWLNamedIndividual(model.IRI('http://ex.org/indivXYZ'))
arg = model.SWRLIndividualArgument(indiv)
arg_hash = (677 * HashCode.MULT) + hash(indiv)
self.assertEqual(arg_hash, HashCode.hash_code(arg))
def test_swrl_literal_arg(self):
literal = model.OWLLiteral('abc')
arg = model.SWRLLiteralArgument(literal)
arg_hash = (683 * HashCode.MULT) + hash(literal)
self.assertEqual(arg_hash, HashCode.hash_code(arg))
def test_swrl_diff_indivs_atom(self):
data_factory = model.OWLDataFactory()
indiv0 = model.OWLNamedIndividual(model.IRI('http://ex.org/indivXYZ'))
arg0 = model.SWRLIndividualArgument(indiv0)
indiv1 = model.OWLAnonymousIndividual(model.NodeID('_:23'))
arg1 = model.SWRLIndividualArgument(indiv1)
atom = model.SWRLDifferentIndividualsAtom(data_factory, arg0, arg1)
atom_hash = (((797 * HashCode.MULT) + hash(arg0)) * HashCode.MULT) + \
hash(arg1)
self.assertEqual(atom_hash, HashCode.hash_code(atom))
def test_swrl_same_indivs_atom(self):
data_factory = model.OWLDataFactory()
indiv0 = model.OWLNamedIndividual(model.IRI('http://ex.org/indivXYZ'))
arg0 = model.SWRLIndividualArgument(indiv0)
indiv1 = model.OWLAnonymousIndividual(model.NodeID('_:23'))
arg1 = model.SWRLIndividualArgument(indiv1)
atom = model.SWRLSameIndividualAtom(data_factory, arg0, arg1)
atom_hash = (((811 * HashCode.MULT) + hash(arg0)) * HashCode.MULT) + \
hash(arg1)
self.assertEqual(atom_hash, HashCode.hash_code(atom))
def test_has_key_axiom(self):
ce = model.OWLClass(model.IRI('http://ex.org/SomeClass'))
pe1 = model.OWLObjectProperty(model.IRI('http://ex.org/prop1'))
pe2 = model.OWLObjectInverseOf(
model.OWLObjectProperty(model.IRI('http://ex.org/prop2')))
pe3 = model.OWLObjectProperty(model.IRI('http://ex.org/prop3'))
pes = {pe1, pe2, pe3}
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLHasKeyAxiom(ce, pes, anns)
axiom_hash = (((821 * HashCode.MULT) + hash(ce)) * HashCode.MULT) + \
HashCode._hash_list(pes)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_ann_prop_dom_axiom(self):
prop = model.OWLAnnotationProperty('http://ex.org/prop')
dom = model.IRI('http://ex.org/sth')
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLAnnotationPropertyDomainAxiom(prop, dom, anns)
axiom_hash = (((823 * HashCode.MULT) + hash(prop)) * HashCode.MULT) + \
hash(dom)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test__owl_ann_prop_range_axiom(self):
prop = model.OWLAnnotationProperty('http://ex.org/prop')
rnge = model.IRI('http://ex.org/sth')
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLAnnotationPropertyRangeAxiom(prop, rnge, anns)
axiom_hash = (((827 * HashCode.MULT) + hash(prop)) * HashCode.MULT) + \
hash(rnge)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_sub_ann_prop_of_axiom(self):
sub_prop = model.OWLAnnotationProperty(model.IRI('http://ex.org/prop1'))
super_prop = model.OWLAnnotationProperty(
model.IRI('http://ex.org/prop2'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLSubAnnotationPropertyOfAxiom(
sub_prop, super_prop, anns)
axiom_hash = (((829 * HashCode.MULT) + hash(sub_prop)) *
HashCode.MULT) + hash(super_prop)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
def test_data_intersect_of(self):
dtype1 = model.OWLDatatype(model.IRI('http://ex.org/dtype/int'))
dtype2 = model.OWLDatatype(model.IRI('http://ex.org/dtype/float'))
operands = {dtype1, dtype2}
node = model.OWLDataIntersectionOf(operands)
node_hash = (839 * HashCode.MULT) + HashCode._hash_list(operands)
self.assertEqual(node_hash, HashCode.hash_code(node))
def test_data_union_of(self):
dtype1 = model.OWLDatatype(model.IRI('http://ex.org/dtype/int'))
dtype2 = model.OWLDatatype(model.IRI('http://ex.org/dtype/float'))
operands = {dtype1, dtype2}
node = model.OWLDataUnionOf(operands)
node_hash = (853 * HashCode.MULT) + HashCode._hash_list(operands)
self.assertEqual(node_hash, HashCode.hash_code(node))
def test_annotation_prop(self):
prop = model.OWLAnnotationProperty(model.IRI('http://ex.org/anProp'))
prop_hash = (857 * HashCode.MULT) + hash(prop.iri)
self.assertEqual(prop_hash, HashCode.hash_code(prop))
def test_anon_indiv(self):
node_id = model.NodeID('_:23')
indiv = model.OWLAnonymousIndividual(node_id)
indiv_hash = (859 * HashCode.MULT) + hash(node_id)
self.assertEqual(indiv_hash, HashCode.hash_code(indiv))
def test_iri(self):
uri = URIRef('http//ex.org/some/uri')
iri = model.IRI(uri)
iri_hash = (863 * HashCode.MULT) + hash(uri)
self.assertEqual(iri_hash, HashCode.hash_code(iri))
# TOOD: the test below fails... think about this!
# self.assertEqual(iri_hash, hash(iri))
def test_annotation(self):
prop = model.OWLAnnotationProperty(model.IRI('http://ex.org/anProp'))
val = model.OWLLiteral('annotation')
ann = model.OWLAnnotation(prop, val, [])
ann_hash = (((877 * HashCode.MULT) + hash(prop)) * HashCode.MULT) + \
hash(val)
self.assertEqual(ann_hash, HashCode.hash_code(ann))
def test_datatype_definition_axiom(self):
dtype = model.OWLDatatype(model.IRI('http://ex.org/dtype/posInt'))
drange = model.OWLDataComplementOf(
model.IRI('http://ex.org/dtype/negInt'))
ann_prop1 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp'))
ann_val1 = model.OWLLiteral('annotation 1')
ann1 = model.OWLAnnotation(ann_prop1, ann_val1, [])
ann_prop2 = model.OWLAnnotationProperty(
model.IRI('http://ex.org/anProp2'))
ann_val2 = model.OWLLiteral('annotation 2')
ann2 = model.OWLAnnotation(ann_prop2, ann_val2, [])
anns = {ann1, ann2}
axiom = model.OWLDatatypeDefinitionAxiom(dtype, drange, anns)
axiom_hash = (((897 * HashCode.MULT) + hash(dtype)) * HashCode.MULT) + \
hash(drange)
self.assertEqual(axiom_hash, HashCode.hash_code(axiom))
| patrickwestphal/owlapy | tests/util/hashcode_tests.py | Python | gpl-3.0 | 55,391 |
from rest_framework.response import Response
from rest_framework.views import APIView
from .common import login_required
from Requests import myudc
# Student's details requests handler
class Student(APIView):
"""
This only returns student's basic info right now,
but in the future it will have all layout details including:
theme preferences, student's modifications and other settings
"""
# Returns student's details on GET request
@staticmethod
@login_required("myudc")
def get(request):
# Return student's basic info as of now
return Response(
# Get & scrape student's basic info from MyUDC
myudc.scrape.student_details(
myudc.get.summarized_schedule(
# Send MyUDC cookies
request.session["myudc"]
)
)
)
| UOSHUB/BackEnd | API/views/details.py | Python | gpl-3.0 | 877 |
#!/usr/bin/python
# Console
import sys, os, time, subprocess
def MCS():
return subprocess.Popen(['python', 'mcs.py'])
def color(text, color):
if color == 0: color = "\033[0m"
if color == 1: color = "\033[94m"
if color == 2: color = "\033[92m"
if color == 3: color = "\033[91m"
if color == 4: color = "\033[93m"
return color+text+"\033[0m"
def showMenu():
print("\033[H\033[J", end="")
print("== Music Control System v0.1 ==")
print("= =")
print("= {} MCS Time {} =".format(color("[7]",1), color("[8]",3)))
print("= {} MCS Auto {} =".format(color("[4]",1), color("[5]",3)))
print("= {} MCS EDT {} =".format(color("[1]",1), color("[2]",3)))
print("= =")
print("========= Informations ========")
print("= =")
print("= =")
print("= =")
print("===============================")
def main():
# Old hack (TODO)
class main:
def poll(): return True
while 1:
showMenu()
command = str(input("=> "))
if command == '4':
if main.poll() == None:
main.terminate()
main.kill()
main = MCS()
if command == '5':
if main.poll() == None:
main.terminate()
main.kill()
if command == 'q':
exit(0)
if __name__ == "__main__": main()
| codeurimpulsif/PyMCS | console.py | Python | gpl-3.0 | 1,539 |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 14 20:43:12 2016
@author: Adrien
"""
import canpy.point_defects
def defects_movie(dl,t_start='standard',t_end='standard'):
'''
Produce a movie in mp4 format of the defects.
t_start is the time at which we want the movie to begin and t_end is the time
at which we want the movie to end.
If t_start and t_end are kept at the 'standard' value the movie will start
at 0ps and end at the last available snapshot. The user is free to change the
beginning and finishing time. If so these times have to be provided in ps.
'''
if t_start == 'standard':
snap_start=0
else:
snap_start = round(t_start/(dl.inter*dl.time_step))
if t_end == 'standard':
snap_end=dl.nb_snapshots-1
else:
snap_end = round(t_end/(dl.inter*dl.time_step))
i=snap_start
while i <= snap_end:
a = package.point_defects(dl,i*dl.inter*dl.time_step)
fig = plt.figure()
ax = fig.gca(projection='3d')
def init():
ax.set_xlabel('x axis[Angström]')
ax.set_ylabel('y axis[Angström]')
ax.set_zlabel('z axis[Angström]')
ax.set_xlim3d(0,self.length)
ax.set_ylim3d(0,self.length)
ax.set_zlim3d(0,self.length)
ax.legend(frameon = True, fancybox = True, ncol = 1, fontsize = 'x-small', loc = 'lower right')
def animate(i):
ax.scatter(xi, yi, zi, label='Interstitials', c='r', marker='^')
ax.scatter(xv, yv, zv, label='Vacancies', c='b', marker='o')
ax.set_title('Interstitials and vacancies at '+str(self.time)+' ps')
# Animate
anim = animation.FuncAnimation(fig, animate, init_func=init,frames=360, interval=speed*20, blit=True)
# Save
anim.save('rot_frame_anim_'+str(self.time)+'ps.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
| ruaultadrien/canpy | canpy/defects_movie.py | Python | gpl-3.0 | 2,116 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2018 Jonathan Peirce
# Distributed under the terms of the GNU General Public License (GPL).
"""Wrapper for all miscellaneous functions and classes from psychopy.tools
"""
# pylint: disable=W0611
# W0611 = Unused import %s
from __future__ import absolute_import, print_function
from psychopy.tools.arraytools import (createXYs, extendArr, makeRadialMatrix,
ratioRange, shuffleArray, val2array)
from psychopy.tools.attributetools import (attributeSetter, setAttribute,
logAttrib)
from psychopy.tools.colorspacetools import (dkl2rgb, dklCart2rgb,
hsv2rgb, lms2rgb,
rgb2dklCart, rgb2lms)
from psychopy.tools.coordinatetools import (cart2pol, pol2cart,
cart2sph, sph2cart)
from psychopy.tools.fileerrortools import handleFileCollision
from psychopy.tools.filetools import toFile, fromFile, mergeFolder
from psychopy.tools.imagetools import array2image, image2array, makeImageAuto
from psychopy.tools.monitorunittools import (cm2deg, deg2cm, cm2pix, pix2cm,
deg2pix, pix2deg, convertToPix)
from psychopy.tools.plottools import plotFrameIntervals
from psychopy.tools.typetools import float_uint8, float_uint16, uint8_float
from numpy import radians, degrees
| hoechenberger/psychopy | psychopy/misc.py | Python | gpl-3.0 | 1,519 |
class TagException(Exception):
pass
class ArgumentException(Exception):
pass
class Tag(object):
__slots__ = ['_name', '_attributes', '_parent', '_previous_sibling', '_next_sibling', '_first_child',
'_last_child', '_children']
def __init__(self, name, attr=None):
self._name = name
if attr is None or not isinstance(attr, dict):
self._attributes = {}
else:
self._attributes = attr
self._parent = None
self._previous_sibling = None
self._next_sibling = None
self._first_child = None
self._last_child = None
self._children = list()
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, value):
self._parent = value
@parent.deleter
def parent(self):
del self._parent
@property
def previous_sibling(self):
return self._previous_sibling
@previous_sibling.setter
def previous_sibling(self, value):
self._previous_sibling = value
@previous_sibling.deleter
def previous_sibling(self):
del self._previous_sibling
@property
def next_sibling(self):
return self._next_sibling
@next_sibling.setter
def next_sibling(self, value):
self._next_sibling = value
@next_sibling.deleter
def next_sibling(self):
del self._next_sibling
@property
def first_child(self):
raise TagException('Not a container tag!')
@property
def last_child(self):
raise TagException('Not a container tag!')
def __getattribute__(self, attr):
try:
return super().__getattribute__(attr)
except AttributeError:
return self._attributes.get(attr)
def __setattr__(self, name, val):
try:
super().__setattr__(name, val)
except AttributeError:
self._attributes[name] = val
def __delattr__(self, name):
try:
super().__delattr__(name)
except AttributeError:
if self._attributes.get(name):
self._attributes.pop(name)
def __str__(self):
result = '<' + self._name
for key, value in self._attributes.items():
result += ' ' + key + '="' + value + '"'
result += '>'
return result
class ContainerTag(Tag):
__slots__ = ['children']
def __init__(self, name, attr=None):
super().__init__(name, attr)
self.children = self.generator_of_children()
@property
def first_child(self):
return self._first_child
@property
def last_child(self):
return self._last_child
def generator_of_children(self):
for child in self._children:
yield child
def append_child(self, tag):
if not issubclass(type(tag), Tag):
raise TypeError("Argument isn't subclass of Tag.")
self._children.append(tag)
index_of_last_child = len(self._children) - 1
self._last_child = self._children[index_of_last_child]
self._last_child.parent = self
if len(self._children) == 1:
self._first_child = self.last_child
def insert_before(self, tag, next_sibling):
if not issubclass(type(tag), Tag):
raise TypeError("Argument isn't subclass of Tag.")
if next_sibling not in self._children:
self.append_child(tag)
return
index_inserted_elemnt = self._children.index(next_sibling)
self._children.insert(index_inserted_elemnt, tag)
index_of_last_child = len(self._children) - 1
self._last_child = self._children[index_of_last_child]
self._children[index_inserted_elemnt].parent = self
if index_inserted_elemnt == 0:
self._first_child = self._children[index_inserted_elemnt]
def __str__(self):
result = '<' + self._name
for key, value in self._attributes.items():
result += ' ' + key + '="' + value + '"'
result += '>'
for item in self.children:
result += str(item)
result += '</' + self._name + '>'
return result
if __name__ == '__main__':
img_1 = Tag('img')
img_1.src = '/python-developer.svg'
img_1.alt = 'Python Разработчик'
img_2 = Tag('img')
img_2.src = '/php-developer.svg'
img_2.alt = 'PHP Разработчик'
img_3 = Tag('img')
img_3.src = '/java-developer.svg'
img_3.alt = 'Java Разработчик'
div = ContainerTag('div')
div.append_child(img_1)
div.append_child(img_2)
div.insert_before(img_3, img_1)
print(div)
| PavlovVitaly/python__homework_ITMO | task_html.py | Python | gpl-3.0 | 4,695 |
#!/usr/bin/python
# Downloads kernel from http://kernel.ubuntu.com/~kernel-ppa/mainline/
# Requires: python-bs4
# Copyright (c) 2012 Savvas Radevic <vicedar@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import urlparse
import urllib
import os
import urllib2
import platform
from bs4 import BeautifulSoup
import re
import sys
import subprocess
import tempfile
# We need to use apt.VersionCompare(a,b) to compare debian package versions
import apt_pkg
import argparse
# MODULE INIT
apt_pkg.init()
# PARSE ARGUMENTS
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-d', '--disable-filter', action='store_true',
help='Do not filter out release candidate versions')
parser.add_argument('-p', '--prefer-stable', action='store_true',
help='Prefer latest stable version instead of latest release candidate of the same version (e.g. prefer v3.9-raring instead of v3.9-rc8-raring)')
parser.add_argument('-l', '--latest-ver', action='store_true',
help='Chooses last version of each branch')
parser.add_argument('-u', '--update', action='store_true',
help='Upgrade kernel installed')
parser.add_argument('-y', '--daily', action='store_true',
help='Download daily build kernel (with next patches)')
parser.add_argument('-w', '--lowlatency', action='store_true',
help='Downloads lowlatency kernel')
args = parser.parse_args()
print(args)
url = "http://kernel.ubuntu.com/~kernel-ppa/mainline/"
print("Contacting {0}".format(url))
source = urllib.urlopen(url).read()
#print(source)
soup = BeautifulSoup(source, "html.parser")
kernels = list()
rel = re.sub('-\w*', '', platform.release())
print("Current system kernel release version: {0}".format(rel))
release = re.sub('([0-9])\.([0-9]{1,2})\.([0-9]{1,2})','',rel);
previous_ver = re.split('\.', rel)
previous_href = ""
upgrade = ""
actual_ver = []
if args.update:
args.latest_ver = True
selk = -1
for link in soup.find_all('a'):
href = link.get('href')
if href[0:5] == "daily" and args.daily:
kernels.append(str(href)+"current/")
selk = 0
elif not args.disable_filter:
#If filter is not disabled, apply all filters
if not args.latest_ver:
selk = 0
#Original way
if re.search("rc\d", href):
#If the version is a release candidate, bypass
continue
if href[0] == "v":
kver = href[1:-1] #strip first and last characters
vc = apt_pkg.version_compare(kver, rel)
if vc > 0:
# If kernel newer than current one
#print("{0} > {1}".format(kver, rel))
kernels.append(href)
else:
if href[0] == "v":
version = re.strip('\.',href[1:-1])
if not args.update:
selk = 0
if int(version[0]) > int(previous_version[0]):
previous_version[0] = version[0]
previous_version[1] = -1
kernels.append(previous_href)
if int(version[1]) > int(previous_version[1]):
previous_version[1] = version[1]
kernels.append(previous_href)
previous_href = href
else:
if int(version[0]) == int(actual_ver[0]) and int(version[1]) == int(actual_ver[1]):
if int(version[2]) > int(actual_ver[2]):
kernels = [href]
selk = 1
else:
selk = 0
kernels.append(href)
if previous_href != "":
kernels.append(previous_href)
if selk == -1:
print "0 UPDATES"
print rel
else:
if not args.update:
# SELECT KERNEL
i = 0
for k in kernels:
i += 1
print("{0}. {1}".format(i, k))
selk = -1
while not 0 < selk <= len(kernels):
try:
defaultk = len(kernels)
if args.prefer_stable:
if re.search('-rc\d+-', kernels[-1]):
# If a release candidate is the last item in list
teststable = re.sub("-rc\d+-","-",kernels[-1])
if teststable in kernels:
defaultk = kernels.index(teststable) + 1
sel = raw_input("Please enter an integer [{0}]: ".format(defaultk))
if sel == "":
selk = defaultk
break
selk = int(sel)
except ValueError:
continue
print("You chose: {0}".format(kernels[selk-1]))
else:
print("You chose: {0}".format(kernels[selk-1]))
# SELECT ARCH
i = 0
archs = ("i386", "amd64")
sysarch = platform.machine().replace(
"x86_64", "amd64").replace("i686", "i386")
print("Your system architecture: {0}".format(sysarch))
try:
defaultarch = archs.index(sysarch)+1
except:
defaultarch = 1
for a in archs:
i += 1
print("{0}. {1}".format(i, a))
sela = -1
while not 0 < sela <= len(archs):
try:
sela = raw_input("Please enter an integer [{0}]: ".format(defaultarch))
if sela == "":
sela = defaultarch
break
sela = int(sela)
except ValueError:
continue
print("You chose: {0}".format(archs[sela-1]))
# SELECT PACKAGES
sel1 = -1
while True:
sel1 = raw_input("Would you like to download kernel headers [Y/n]: ")
if sel1 == "":
selkh = True
break
if not sel1 in tuple("yYnN"):
continue
else:
if sel1 in tuple("yY"):
selkh = True
else:
selkh = False
break
sel2 = -1
while True:
sel2 = raw_input("Would you like to download kernel image [Y/n]: ")
if sel2 == "":
selki = True
break
if not sel2 in tuple("yYnN"):
continue
else:
if sel2 in tuple("yY"):
selki = True
else:
selki = False
break
sel3 = -1
while True:
sel3 = raw_input("Would you like to download kernel extras [Y/n]: ")
if sel3 == "":
selke = True
break
if not sel3 in tuple("yYnN"):
continue
else:
if sel3 in tuple("yY"):
selke = True
else:
selke = False
break
print("Kernel headers: {0}, Kernel image: {1}, Kernel extras: {2}".
format(selkh, selki, selke))
# selk = selected kernel
# sela = selected arch
# selkh = kernel headers? T/F
# selki = kernel image? T/F
# selke = kernel extra? T/F
link = "http://kernel.ubuntu.com/~kernel-ppa/mainline/{0}".format(kernels[selk-1])
print("Contacting {0}".format(link))
source = urllib.urlopen(link).read()
soup = BeautifulSoup(source)
files = list()
for l in soup.find_all('a'):
href = l.get('href')
rxstr = "linux-headers.*_(?:{0}|all)\.deb".format(archs[sela-1])
if selkh and re.search(rxstr, href):
url = "{0}{1}".format(link, href)
files.append(url)
rxstr = "linux-image.*_{0}\.deb".format(archs[sela-1])
if selki and re.search(rxstr, href):
url = "{0}{1}".format(link, href)
files.append(url)
rxstr = "linux-image-extra.*_{0}\.deb".format(archs[sela-1])
if selke and re.search(rxstr, href):
url = "{0}{1}".format(link, href)
files.append(url)
#Create temp folder
tempfolder = tempfile.mkdtemp()
print("Using temporary folder: {0}".format(tempfolder))
re_lowlatency = re.compile('.*lowlatency.*')
re_generic = re.compile('.*generic.*')
files2 = []
for url in files:
if args.lowlatency:
coincidence = re_lowlatency.match(url)
if coincidence:
files2.append(coincidence.group())
else:
coincidence = re_generic.match(url)
if coincidence:
files2.append(coincidence.group())
files = files2
print files
for url in files:
#Change directory to temp folder
os.chdir(tempfolder)
file_name = url.split('/')[-1]
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print("Downloading: {0} Bytes: {1}".format(url, file_size))
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
p = float(file_size_dl) / file_size
status = r"{0} [{1:.2%}]".format(file_size_dl, p)
status = status + chr(8)*(len(status)+1)
sys.stdout.write(status)
f.close()
# INSTALL PACKAGES
sel6 = -1
while True:
sel6 = raw_input("Would you like to install the downloaded packages? [Y/n]: ")
if sel6 == "":
selinst = True
break
if not sel6 in tuple("yYnN"):
continue
else:
if sel6 in tuple("yY"):
selinst = True
else:
selinst = False
break
if selinst:
print("Installing packages... please type in your password if requested")
subprocess.call("sudo dpkg -i {0}/*.deb".format(tempfolder), shell=True)
else:
print("Will not install packages")
raw_input("All done! Press [Enter] key to exit.")
| jaimejimbo/kmp-downloader | kmpd.py | Python | gpl-3.0 | 8,901 |
#!/usr/bin/env python
from datetime import timedelta
import os
import random
from django.utils.dateparse import parse_date
from faker import Faker
test_email = 'michael.b001@gmx.de'
fake = Faker('de')
fake.seed(1)
random.seed(1)
def get_random_date():
return parse_date('1983-03-31') + timedelta(days=random.randint(-5000,
1000))
def populate():
for _ in range(100):
candidate = add_candidate(first_name=fake.first_name(),
last_name=fake.last_name(),
date_of_birth=get_random_date())
add_registration(candidate=candidate,
bicycle_kind=random.randint(1, 4),
email=fake.email())
def add_candidate(first_name, last_name, date_of_birth):
return Candidate.objects.create(first_name=first_name,
last_name=last_name,
date_of_birth=date_of_birth)
def add_registration(candidate, bicycle_kind, email):
return UserRegistration.objects.create(candidate=candidate,
bicycle_kind=bicycle_kind,
email=email)
def add_event(due_date):
return HandoutEvent.objects.create(due_date=due_date)
def add_bicycle():
b = Bicycle.objects.create()
return b
# Start execution here!
if __name__ == '__main__':
print("Starting FIRST_APP population script...")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bwb.settings')
import django
django.setup()
from register.models import UserRegistration, Candidate, Bicycle
from register.models import HandoutEvent
populate()
| michaelbratsch/bwb | populate.py | Python | gpl-3.0 | 1,775 |
# YouTube Video: https://www.youtube.com/watch?v=4E7N7W1lUkU
import os
import pydub
import glob
wav_files = glob.glob('./*.wav')
for wav_file in wav_files:
mp3_file = os.path.splitext(wav_file)[0] + '.mp3'
sound = pydub.AudioSegment.from_wav(wav_file)
sound.export(mp3_file, format="mp3")
os.remove(wav_file)
print("Conversion Complete")
| vprusso/youtube_tutorials | utility_scripts/wav_to_mp3/wav_to_mp3.py | Python | gpl-3.0 | 356 |
import datetime
from django.http import HttpResponse
import logging
from natsort import natsorted
import pandas as pd
from PyPDF2 import PdfFileReader, PdfFileMerger
from pyper import *
import json
from database.models import ko_entry
import functions
LOG_FILENAME = 'error_log.txt'
pd.set_option('display.max_colwidth', -1)
def getGAGE(request, stops, RID, PID):
try:
if request.is_ajax():
# Get variables from web page
allJson = request.body.split('&')[0]
all = json.loads(allJson)
functions.setBase(RID, 'Step 1 of 6: Reading normalized data file...')
# Select samples and meta-variables from savedDF
functions.setBase(RID, 'Step 2 of 6: Selecting your chosen meta-variables...')
metaValsCat = all['metaValsCat']
metaIDsCat = all['metaIDsCat']
metaValsQuant = []
metaIDsQuant = []
DepVar = int(all["DepVar"])
# Create meta-variable DataFrame, final sample list, final category and quantitative field lists based on tree selections
savedDF, metaDF, finalSampleIDs, catFields, remCatFields, quantFields, catValues, quantValues = functions.getMetaDF(request.user, metaValsCat, metaIDsCat, metaValsQuant, metaIDsQuant, DepVar, levelDep=True)
if not catFields:
error = "Selected categorical variable(s) contain only one level.\nPlease select different variable(s)."
myDict = {'error': error}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
if not finalSampleIDs:
error = "No valid samples were contained in your final dataset.\nPlease select different variable(s)."
myDict = {'error': error}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
result = ''
result += 'Categorical variables selected by user: ' + ", ".join(catFields + remCatFields) + '\n'
result += 'Categorical variables not included in the statistical analysis (contains only 1 level): ' + ", ".join(remCatFields) + '\n'
result += 'Quantitative variables selected by user: ' + ", ".join(quantFields) + '\n'
result += '===============================================\n\n'
functions.setBase(RID, 'Step 2 of 6: Selecting your chosen meta-variables...done')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
functions.setBase(RID, 'Step 3 of 6: Mapping phylotypes to KEGG pathways...')
if os.name == 'nt':
r = R(RCMD="R/R-Portable/App/R-Portable/bin/R.exe", use_pandas=True)
else:
r = R(RCMD="R/R-Linux/bin/R", use_pandas=True)
functions.setBase(RID, 'Verifying R packages...missing packages are being installed')
# R packages from biocLite
r("list.of.packages <- c('gage', 'edgeR', 'pathview')")
r("new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,'Package'])]")
r("if (length(new.packages)) source('http://bioconductor.org/biocLite.R')")
r("if (length(new.packages)) biocLite(new.packages, type='source', suppressUpdate=T, dependencies=T)")
# R packages from cran
r("list.of.packages <- c('png', 'grid', 'plyr')")
r("new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,'Package'])]")
r("if (length(new.packages)) install.packages(new.packages, repos='http://cran.us.r-project.org', dependencies=T)")
functions.setBase(RID, 'Step 3 of 6: Mapping phylotypes to KEGG pathways...')
r("library(gage)")
r("library(edgeR)")
r("library(pathview)")
r("library(png)")
r("library(grid)")
r("library(plyr)")
keggString = all["kegg"]
keggDict = json.JSONDecoder(object_pairs_hook=functions.multidict).decode(keggString)
nameList = []
for value in keggDict.itervalues():
if isinstance(value, list):
nameList.extend(value)
else:
nameList.append(value)
# Enable this only if you want to update gage data and pathways
'''
r("list.of.packages <- c('gageData')")
r("new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,'Package'])]")
r("if (length(new.packages)) source('http://bioconductor.org/biocLite.R')")
print r("if (length(new.packages)) biocLite(new.packages)")
r('library(gageData)')
r("data(kegg.sets.ko)")
r("save(kegg.sets.ko, file='myPhyloDB/media/kegg/kegg.sets.ko.RData')")
r("for (name in names(kegg.sets.ko)) { \
id = substr(name, 3, 7); \
download.kegg(pathway.id=id, species='ko', kegg.dir='myPhyloDB/media/kegg/pathways', file.type=c('xml', 'png')) \
} \
")
'''
r("load('myPhyloDB/media/kegg/kegg.sets.ko.RData')")
keggDict = {}
r("selPaths <- vector()")
for i in nameList:
pathStr = i.split('[PATH:')[1].split(']')[0]
r.assign("pathStr", pathStr)
r("selPath <- kegg.sets.ko[grepl(paste(pathStr), names(kegg.sets.ko))]")
key = r.get("names(selPath)")
value = r.get("selPath$ko")
keggDict[key] = value.tolist()
r("selPaths <- append(selPaths, names(selPath))")
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
keggAll = 4
mapTaxa = 'no'
finalDF, junk = functions.getKeggDF(keggAll, '', savedDF, metaDF, DepVar, mapTaxa, RID, stops, PID)
# make sure column types are correct
finalDF[catFields] = finalDF[catFields].astype(str)
functions.setBase(RID, 'Step 4 of 6: Performing GAGE analysis...')
# save location info to session
myDir = 'myPhyloDB/media/temp/gage/'
if not os.path.exists(myDir):
os.makedirs(myDir)
path = str(myDir) + str(RID) + '.biom'
functions.imploding_panda(path, 2, DepVar, finalSampleIDs, metaDF, finalDF)
count_rDF = pd.DataFrame()
if DepVar == 0:
count_rDF = finalDF.pivot(index='rank_id', columns='sampleid', values='abund')
elif DepVar == 4:
count_rDF = finalDF.pivot(index='rank_id', columns='sampleid', values='abund_16S')
# need to change rank_id to kegg orthologies for gage analysis
count_rDF.reset_index(drop=False, inplace=True)
count_rDF.rename(columns={'index': 'rank_id'}, inplace=True)
idList = count_rDF.rank_id.tolist()
idDict = {}
for id in idList:
entry = ko_entry.objects.using('picrust').get(ko_lvl4_id=id).ko_orthology
idDict[id] = entry
count_rDF['ko'] = count_rDF['rank_id'].map(idDict)
count_rDF.drop('rank_id', axis=1, inplace=True)
count_rDF.drop_duplicates(keep='last', inplace=True) # remove dups - KOs mapped to multiple pathways
count_rDF.set_index('ko', drop=True, inplace=True)
# make metaDF R compatible, remove offending characters in categorical variables
for cat in catFields:
metaDF[cat] = metaDF[cat].str.replace('-', '_')
metaDF[cat] = metaDF[cat].str.replace(' ', '_')
metaDF[cat] = metaDF[cat].str.replace('(', '_')
metaDF[cat] = metaDF[cat].str.replace(')', '_')
# Create combined metadata column
if len(catFields) > 1:
for index, row in metaDF.iterrows():
metaDF.loc[index, 'merge'] = ".".join(row[catFields])
else:
metaDF.loc[:, 'merge'] = metaDF.loc[:, catFields[0]]
wantedList = ['merge', 'sampleid', 'sample_name']
metaDF = metaDF.loc[:, wantedList]
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
finalDict = {}
metaDF.sort_values('sampleid', inplace=True)
r.assign("metaDF", metaDF)
r("trt <- factor(metaDF$merge)")
r.assign("count", count_rDF)
r.assign("sampleIDs", count_rDF.columns.values.tolist())
r("names(count) <- sampleIDs")
r('e <- DGEList(counts=count)')
r('e <- calcNormFactors(e, method="none")')
r('design <- model.matrix(~ 0 + trt)')
r('trtLevels <- levels(trt)')
r('colnames(design) <- trtLevels')
r('e <- estimateGLMCommonDisp(e, design)')
r('e <- estimateGLMTrendedDisp(e, design)')
r('e <- estimateGLMTagwiseDisp(e, design)')
r('fit <- glmFit(e, design)')
fit = r.get('fit')
if not fit:
error = "edgeR failed!\nUsually this is caused by one or more taxa having a negative disperion.\nTry filtering your data to remove problematic taxa (e.g. remove phylotypes with 50% or more zeros)."
myDict = {'error': error}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
if DepVar == 0:
result += 'Dependent Variable: Abundance' + '\n'
elif DepVar == 4:
result += 'Dependent Variable: Total Abundance' + '\n'
result += '\n===============================================\n\n\n'
levels = list(set(metaDF['merge'].tolist()))
levels = natsorted(levels, key=lambda y: y.lower())
r("pdf_counter <- 1")
path = os.path.join('myPhyloDB', 'media', 'temp', 'gage', 'Rplots', RID)
if not os.path.exists(path):
os.makedirs(path)
r.assign("path", path)
r("setwd(path)")
r("options(width=5000)")
r.assign("RID", RID)
gageDF = pd.DataFrame(columns=['comparison', 'pathway', ' p.geomean ', ' stat.mean ', ' p.val ', ' q.val ', ' set.size '])
diffDF = pd.DataFrame(columns=['comparison', 'kegg', ' baseMean ', ' baseMeanA ', ' baseMeanB ', ' logFC ', ' logCPM ', ' LR ', ' pval ', ' FDR '])
mergeList = metaDF['merge'].tolist()
mergeSet = list(set(mergeList))
for i in xrange(len(levels)-1):
for j in xrange(i+1, len(levels)):
trt1 = levels[i]
trt2 = levels[j]
r.assign("trt1", trt1)
r.assign("trt2", trt2)
'''
Error in makeContrasts(contVec, levels = design) :
The levels must by syntactically valid names in R, see help(make.names). Non-valid names: A-pinene,B-caryophyllene
# potential fix on line 177
'''
r('contVec <- sprintf("%s-%s", trt1, trt2)')
r('cont.matrix= makeContrasts(contVec, levels=design)')
r('lrt <- glmLRT(fit, contrast=cont.matrix)')
r("res <- as.data.frame(topTags(lrt, n=nrow(lrt$table)))")
r('res <- res[ order(row.names(res)), ]')
r('res')
taxaIDs = r.get("row.names(res)")
r("change <- -res$logFC")
r("names(change) <- row.names(res)")
baseMean = count_rDF.mean(axis=1)
baseMean = baseMean.loc[baseMean.index.isin(taxaIDs)]
listA = metaDF[metaDF['merge'] == mergeSet[i]].sampleid.tolist()
baseMeanA = count_rDF[listA].mean(axis=1)
baseMeanA = baseMeanA.loc[baseMeanA.index.isin(taxaIDs)]
listB = metaDF[metaDF['merge'] == mergeSet[j]].sampleid.tolist()
baseMeanB = count_rDF[listB].mean(axis=1)
baseMeanB = baseMeanB.loc[baseMeanB.index.isin(taxaIDs)]
r.assign("baseMean", baseMean)
r.assign("baseMeanA", baseMeanA)
r.assign("baseMeanB", baseMeanB)
r('baseMean <- baseMean[ order(as.numeric(row.names(baseMean))), ]')
r('baseMeanA <- baseMeanA[ order(as.numeric(row.names(baseMeanA))), ]')
r('baseMeanB <- baseMeanB[ order(as.numeric(row.names(baseMeanB))), ]')
# output DiffAbund to DataTable
r("df <- data.frame(kegg=row.names(res), baseMean=baseMean, baseMeanA=baseMeanA, \
baseMeanB=baseMeanB, logFC=-res$logFC, logCPM=res$logCPM, \
LR=res$LR, pval=res$PValue, FDR=res$FDR) \
")
nbinom_res = r.get("df")
nbinom_res.fillna(value=1.0, inplace=True)
if nbinom_res is None:
myDict = {'error': "edgeR failed!\nPlease try a different data combination."}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
comparison = str(trt1) + ' vs. ' + str(trt2)
nbinom_res.insert(0, 'comparison', comparison)
diffDF = diffDF.append(nbinom_res, ignore_index=True)
### GAGE analysis on all pathways...
r("gage.res <- gage(change, gsets=kegg.sets.ko, species='ko', same.dir=FALSE)")
r("df2 <- data.frame(pathway=row.names(gage.res$greater), p.geomean=gage.res$greater[, 1], stat.mean=gage.res$greater[, 2], \
p.val=gage.res$greater[, 3], q.val=gage.res$greater[, 4], \
set.size=gage.res$greater[, 5])")
compDF = r.get("df2")
compDF.insert(0, 'comparison', comparison)
gageDF = gageDF.append(compDF, ignore_index=True)
### Get data way for pathview
# merge sign and sig to get vector (1=sig. positive, 0=not sig., -1=sig. negative)
r("binary <- change / abs(change)")
r("sig <- as.vector((res$PValue <= 0.05))")
r("sig <- sig * 1")
r("sig <- sig * binary")
r("names(sig) <- row.names(res)")
for key in keggDict.iterkeys():
r.assign("pathway", key)
r("pid <- substr(pathway, start=1, stop=7)")
r("pv <- pathview(gene.data=sig, pathway.id=pid, species='ko', kegg.dir='../../../../kegg/pathways', \
kegg.native=T, multi.state=F, same.layer=T, low='red', mid='gray', high='green')")
# convert to pdf
r("pdf(paste('gage_temp', pdf_counter, '.pdf', sep=''))")
r("plot.new()")
r("pngRaster <- readPNG(paste(pid, 'pathview.png', sep='.'))")
r("grid.raster(pngRaster)")
r("mtext(paste(trt1, ' vs ', trt2, sep=''), side=3, line=3, col='blue')")
r("dev.off()")
r("pdf_counter <- pdf_counter + 1")
functions.setBase(RID, 'Step 4 of 6: Performing GAGE Analysis...\nComparison: ' + str(trt1) + ' vs ' + str(trt2) + ' is done!')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
functions.setBase(RID, 'Step 5 of 6: Pooling pdf files for display...')
# Combining Pdf files
finalFile = 'myPhyloDB/media/temp/gage/Rplots/' + str(RID) + '/gage_final.pdf'
pdf_files = [f for f in os.listdir(path) if f.endswith("pdf")]
if pdf_files:
pdf_files = natsorted(pdf_files, key=lambda y: y.lower())
merger = PdfFileMerger()
for filename in pdf_files:
merger.append(PdfFileReader(os.path.join(path, filename), 'rb'))
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
merger.write(finalFile)
functions.setBase(RID, 'Step 6 of 6: Formatting result tables...this may take several minutes')
#Export tables to html
gage_table = gageDF.to_html(classes="table display")
gage_table = gage_table.replace('border="1"', 'border="0"')
finalDict['gage_table'] = str(gage_table)
diff_table = diffDF.to_html(classes="table display")
diff_table = diff_table.replace('border="1"', 'border="0"')
finalDict['diff_table'] = str(diff_table)
finalDict['text'] = result
finalDict['error'] = 'none'
res = json.dumps(finalDict)
return HttpResponse(res, content_type='application/json')
except Exception as e:
if not stops[PID] == RID:
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG,)
myDate = "\nDate: " + str(datetime.datetime.now()) + "\n"
logging.exception(myDate)
myDict = {}
myDict['error'] = "There was an error during your analysis:\nError: " + str(e.message) + "\nTimestamp: " + str(datetime.datetime.now())
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
| manterd/myPhyloDB | functions/analysis/gage_graphs.py | Python | gpl-3.0 | 19,411 |
import numpy as np
import pyCloudy as pc
import matplotlib.pyplot as plt
from pyneb.utils.physics import IP
# TODO: Add comments
"""
Pregunta 1
"""
def alpha_B(Te):
"""
Recomb. coefficient, case B
"""
T4 = Te/1e4
return 2.6e-13/T4
def U_mean_def(QH0, Ne, Rstr):
"""
\int_0^{Rstr}{U.dV} / \int_0^{Rstr}{dV}
Return the mean over Stromgren volume of U
"""
return 3.* QH0 / (4. * np.pi * pc.CST.CLIGHT * Ne * Rstr**2)
def QH0_def(Rstr, Ne, ff, Te = 1e4):
"""
Volume of
"""
return 4. / 3. * np.pi * Rstr**3 * Ne**2 * ff * alpha_B(Te)
def Rstr(QH0, Ne, ff, Te = 1e4):
return (3. * QH0 / (4. * np.pi * ff * alpha_B(Te) * Ne**2))**(1./3.)
def U_mean(QH0, Ne, ff, Te = 1e4):
return (Ne * QH0 * ff**2 * 3 / (4. * np.pi) * alpha_B(Te)**2)**(1./3.) / pc.CST.CLIGHT
def QH0(U_mean, Ne, ff, Te = 1e4):
return U_mean**3 * pc.CST.CLIGHT**3 * 4. * np.pi / 3. / (Ne * ff**2 * alpha_B(Te)**2)
# --------------------------------------
"""
Pregunta 3
"""
def make_model(name, models_dir='./', SED='BB', qH=None, SED_params=None, n_zones = None, iterate=1):
pc.log_.level=3
abund_AGSS09 = {'He' : 10.93, 'C' : 8.43, 'N' : 7.83, 'O' : 8.69, 'Ne' : 7.93, 'Mg' : 7.6,
'S' : 7.12, 'Ar' : 6.40, 'Fe' : 7.5, 'Cl' : 5.5, 'Si' : 7.51}
for elem in abund_AGSS09:
abund_AGSS09[elem] -= 12
if elem != 'He':
abund_AGSS09[elem] -= 0.3
options = ('no molecules',
'no level2 lines',
'no fine opacities',
'atom h-like levels small',
'atom he-like levels small',
'COSMIC RAY BACKGROUND',
'element limit off -8',
)
c_input = pc.CloudyInput('{0}/{1}'.format(models_dir, name))
if SED == 'BB':
c_input.set_BB(Teff = SED_params, lumi_unit = 'q(H)', lumi_value = qH)
else:
c_input.set_star(SED = SED, SED_params = SED_params, lumi_unit = 'q(H)', lumi_value=qH)
# Defining the density. You may also use set_dlaw(parameters) if you have a density law defined in dense_fabden.cpp.
c_input.set_cste_density(2, ff = 1.)
# Defining the inner radius. A second parameter would be the outer radius (matter-bounded nebula).
c_input.set_radius(r_in = np.log10(pc.CST.PC/10))
c_input.set_abund(ab_dict = abund_AGSS09, nograins = True)
c_input.set_other(options)
c_input.set_iterate(iterate) # (0) for no iteration, () for one iteration, (N) for N iterations.
c_input.set_sphere() # () or (True) : sphere, or (False): open geometry.
c_input.set_distance(dist=1., unit='kpc', linear=True) # unit can be 'kpc', 'Mpc', 'parsecs', 'cm'. If linear=False, the distance is in log.
if n_zones is not None:
c_input.set_stop('zones {0}'.format(n_zones))
c_input.print_input()
c_input.run_cloudy()
def plot_model(name, models_dir = './', style='-', fig_num = 1):
pc.log_.level=3
M = pc.CloudyModel('{0}/{1}'.format(models_dir, name), read_emis = False)
X = M.radius/1e19
colors = ['r', 'g', 'b', 'y', 'm', 'c']
plt.figure(fig_num)
plt.subplot(3, 3, 1)
plt.plot(X, M.get_ionic('H', 0), label='H0', linestyle=style, c= colors[0])
plt.plot(X, M.get_ionic('H', 1), label='H+', linestyle=style, c= colors[1])
plt.plot(X, M.get_ionic('He', 0), label='He0', linestyle=style, c= colors[2])
plt.plot(X, M.get_ionic('He', 1), label='He+', linestyle=style, c= colors[3])
plt.plot(X, M.get_ionic('He', 2), label='He++', linestyle=style, c= colors[4])
if style== '-':
plt.legend()
plt.title(name)
for i_plot, elem in enumerate(['N', 'O', 'Ne', 'S', 'Ar']):
plt.subplot(3, 3, i_plot + 2)
for i in np.arange(4):
plt.plot(X, M.get_ionic(elem, i), linestyle=style, c=colors[i])
plt.text(np.max(X)/2, 0.9, elem)
if i_plot == 0:
plt.title(M.date_model)
plt.subplot(3, 3, 7)
plt.plot(X, M.ne, label=r'N$_e$', linestyle=style, c='blue')
plt.plot(X, M.nH, label='N$_H$', linestyle=style, c='red')
if style== '-':
plt.legend(loc=3)
plt.xlabel(r'R [10$^{19}$cm]')
plt.subplot(3, 3, 8)
plt.plot(X, M.te, label=r'T$_e$', linestyle=style, c='blue')
if style== '-':
plt.legend(loc=3)
plt.subplot(3, 3, 9)
plt.plot(X, M.log_U, label='log U', c='blue')
if style== '-':
plt.legend()
def search_T(name, models_dir = './', SED = 'BB'):
Ms = pc.load_models('{0}/{1}'.format(models_dir, name), read_emis = False)
if SED == 'BB':
T = np.array([float(pc.sextract(M.out['Blackbody'], 'dy ', '*')) for M in Ms])
elif SED == 'WM':
T = np.array([float(pc.sextract(M.out['table star'], 'mod" ', '4.0')) for M in Ms])
QH0 = np.array([M.Q0 for M in Ms])
QHe0 = np.array([M.Q[1::].sum() for M in Ms])
plt.plot(T/1e3, QHe0/QH0)
plt.xlabel('T [kK]')
plt.ylabel('QHe0/QH0')
def print_Xi(name, models_dir = './'):
Ms = pc.load_models('{0}/{1}'.format(models_dir, name), read_emis = False)
names = [M.model_name_s for M in Ms]
print(names)
print('H0/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('H', 0),
Ms[1].get_ab_ion_vol('H', 0),
Ms[2].get_ab_ion_vol('H', 0)))
print('H1/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('H', 1),
Ms[1].get_ab_ion_vol('H', 1),
Ms[2].get_ab_ion_vol('H', 1)))
print('He0/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('He', 0),
Ms[1].get_ab_ion_vol('He', 0),
Ms[2].get_ab_ion_vol('He', 0)))
print('He1/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('He', 1),
Ms[1].get_ab_ion_vol('He', 1),
Ms[2].get_ab_ion_vol('He', 1)))
print('He2/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('He', 2),
Ms[1].get_ab_ion_vol('He', 2),
Ms[2].get_ab_ion_vol('He', 2)))
for elem in ['N', 'O', 'Ne', 'S', 'Ar']:
for i in np.arange(4):
print('{0:2s}{1}/H: {2:.2e} {3:.2e} {4:.2e}'.format(elem, i, Ms[0].get_ab_ion_vol(elem, i),
Ms[1].get_ab_ion_vol(elem, i),
Ms[2].get_ab_ion_vol(elem, i)))
def plot_SED(name, models_dir = './', unit='Jy'):
Ms = pc.load_models('{0}/{1}'.format(models_dir, name), read_emis = False)
plt.figure()
plt.subplot(2, 1, 1)
for M in Ms:
plt.plot(M.get_cont_x(unit = 'eV'), np.log10(M.get_cont_y(unit = 'esHz')), label=M.model_name_s)
plt.xlim((10., 60))
plt.ylim((18, 24))
plt.ylabel('log [erg.s-1.Hz-1]')
plt.legend(loc=3)
plt.subplot(2, 1, 2)
for M in Ms:
plt.plot(M.get_cont_x(unit = 'eV'), np.log10(M.get_cont_y(unit = 'Q')), label=M.model_name_s)
plt.xlim((10., 60))
plt.ylim((42., 50))
plt.xlabel('E [eV]')
plt.ylabel('QH0(E)')
# TODO: avoid overlap
for ip in IP:
plt.plot([IP[ip], IP[ip]], [49, 50])
plt.text(IP[ip], 48, ip)
| Morisset/PyNeb_devel | pyneb/sample_scripts/Choroni_School/ex6_2.py | Python | gpl-3.0 | 7,588 |
""" RHUIManager Sync functions """
import re
from stitches.expect import Expect
from rhuilib.rhuimanager import RHUIManager
from rhuilib.util import Util
class RHUIManagerSync(object):
'''
Represents -= Synchronization Status =- RHUI screen
'''
@staticmethod
def sync_cds(connection, cdslist):
'''
sync an individual CDS immediately
'''
RHUIManager.screen(connection, "sync")
Expect.enter(connection, "sc")
RHUIManager.select(connection, cdslist)
RHUIManager.proceed_with_check(connection, "The following CDS instances will be scheduled for synchronization:", cdslist)
RHUIManager.quit(connection)
@staticmethod
def sync_cluster(connection, clusterlist):
'''
sync a CDS cluster immediately
'''
RHUIManager.screen(connection, "sync")
Expect.enter(connection, "sl")
RHUIManager.select(connection, clusterlist)
RHUIManager.proceed_with_check(connection, "The following CDS clusters will be scheduled for synchronization:", clusterlist)
RHUIManager.quit(connection)
@staticmethod
def get_cds_status(connection, cdsname):
'''
display CDS sync summary
'''
RHUIManager.screen(connection, "sync")
Expect.enter(connection, "dc")
res_list = Expect.match(connection, re.compile(".*\n" + cdsname.replace(".", "\.") + "[\.\s]*\[([^\n]*)\].*" + cdsname.replace(".", "\.") + "\s*\r\n([^\n]*)\r\n", re.DOTALL), [1, 2], 60)
connection.cli.exec_command("killall -s SIGINT rhui-manager")
ret_list = []
for val in [res_list[0]] + res_list[1].split(" "):
val = Util.uncolorify(val.strip())
ret_list.append(val)
RHUIManager.quit(connection)
return ret_list
@staticmethod
def sync_repo(connection, repolist):
'''
sync an individual repository immediately
'''
RHUIManager.screen(connection, "sync")
Expect.enter(connection, "sr")
Expect.expect(connection, "Select one or more repositories.*for more commands:", 60)
Expect.enter(connection, "l")
RHUIManager.select(connection, repolist)
RHUIManager.proceed_with_check(connection, "The following repositories will be scheduled for synchronization:", repolist)
RHUIManager.quit(connection)
@staticmethod
def get_repo_status(connection, reponame):
'''
display repo sync summary
'''
RHUIManager.screen(connection, "sync")
Expect.enter(connection, "dr")
reponame_quoted = reponame.replace(".", "\.")
res = Expect.match(connection, re.compile(".*" + reponame_quoted + "\s*\r\n([^\n]*)\r\n.*", re.DOTALL), [1], 60)[0]
connection.cli.exec_command("killall -s SIGINT rhui-manager")
res = Util.uncolorify(res)
ret_list = res.split(" ")
for i in range(len(ret_list)):
ret_list[i] = ret_list[i].strip()
RHUIManager.quit(connection)
return ret_list
| RedHatQE/rhui-testing-tools | rhuilib/rhuimanager_sync.py | Python | gpl-3.0 | 3,081 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_auth
short_description: "Module to manage authentication to oVirt/RHV"
author: "Ondra Machacek (@machacekondra)"
version_added: "2.2"
description:
- "This module authenticates to oVirt/RHV engine and creates SSO token, which should be later used in
all other oVirt/RHV modules, so all modules don't need to perform login and logout.
This module returns an Ansible fact called I(ovirt_auth). Every module can use this
fact as C(auth) parameter, to perform authentication."
options:
state:
default: present
choices: ['present', 'absent']
description:
- "Specifies if a token should be created or revoked."
username:
required: False
description:
- "The name of the user. For example: I(admin@internal)
Default value is set by I(OVIRT_USERNAME) environment variable."
password:
required: False
description:
- "The password of the user. Default value is set by I(OVIRT_PASSWORD) environment variable."
token:
required: False
description:
- "SSO token to be used instead of login with username/password.
Default value is set by I(OVIRT_TOKEN) environment variable."
version_added: 2.5
url:
required: False
description:
- "A string containing the API URL of the server.
For example: I(https://server.example.com/ovirt-engine/api).
Default value is set by I(OVIRT_URL) environment variable."
- "Either C(url) or C(hostname) is required."
hostname:
required: False
description:
- "A string containing the hostname of the server.
For example: I(server.example.com).
Default value is set by I(OVIRT_HOSTNAME) environment variable."
- "Either C(url) or C(hostname) is required."
version_added: "2.6"
insecure:
required: False
description:
- "A boolean flag that indicates if the server TLS certificate and host name should be checked."
type: bool
ca_file:
required: False
description:
- "A PEM file containing the trusted CA certificates. The
certificate presented by the server will be verified using these CA
certificates. If C(ca_file) parameter is not set, system wide
CA certificate store is used.
Default value is set by I(OVIRT_CAFILE) environment variable."
timeout:
required: False
description:
- "The maximum total time to wait for the response, in
seconds. A value of zero (the default) means wait forever. If
the timeout expires before the response is received an exception
will be raised."
compress:
required: False
description:
- "A boolean flag indicating if the SDK should ask
the server to send compressed responses. The default is I(True).
Note that this is a hint for the server, and that it may return
uncompressed data even when this parameter is set to I(True)."
type: bool
kerberos:
required: False
description:
- "A boolean flag indicating if Kerberos authentication
should be used instead of the default basic authentication."
type: bool
headers:
required: False
description:
- "A dictionary of HTTP headers to be added to each API call."
version_added: "2.4"
requirements:
- python >= 2.7
- ovirt-engine-sdk-python >= 4.3.0
notes:
- "Everytime you use ovirt_auth module to obtain ticket, you need to also revoke the ticket,
when you no longer need it, otherwise the ticket would be revoked by engine when it expires.
For an example of how to achieve that, please take a look at I(examples) section."
- "In order to use this module you have to install oVirt/RHV Python SDK.
To ensure it's installed with correct version you can create the following task:
I(pip: name=ovirt-engine-sdk-python version=4.3.0)"
- "Note that in oVirt/RHV 4.1 if you want to use a user which is not administrator
you must enable the I(ENGINE_API_FILTER_BY_DEFAULT) variable in engine. In
oVirt/RHV 4.2 and later it's enabled by default."
'''
EXAMPLES = '''
- block:
# Create a vault with `ovirt_password` variable which store your
# oVirt/RHV user's password, and include that yaml file with variable:
- include_vars: ovirt_password.yml
- name: Obtain SSO token with using username/password credentials
ovirt_auth:
url: https://ovirt.example.com/ovirt-engine/api
username: admin@internal
ca_file: ca.pem
password: "{{ ovirt_password }}"
# Previous task generated I(ovirt_auth) fact, which you can later use
# in different modules as follows:
- ovirt_vm:
auth: "{{ ovirt_auth }}"
state: absent
name: myvm
always:
- name: Always revoke the SSO token
ovirt_auth:
state: absent
ovirt_auth: "{{ ovirt_auth }}"
# When user will set following environment variables:
# OVIRT_URL = https://fqdn/ovirt-engine/api
# OVIRT_USERNAME = admin@internal
# OVIRT_PASSWORD = the_password
# User can login the oVirt using environment variable instead of variables
# in yaml file.
# This is mainly useful when using Ansible Tower or AWX, as it will work
# for Red Hat Virtualization credentials type.
- name: Obtain SSO token
ovirt_auth:
state: present
'''
RETURN = '''
ovirt_auth:
description: Authentication facts, needed to perform authentication to oVirt/RHV.
returned: success
type: complex
contains:
token:
description: SSO token which is used for connection to oVirt/RHV engine.
returned: success
type: str
sample: "kdfVWp9ZgeewBXV-iq3Js1-xQJZPSEQ334FLb3eksoEPRaab07DhZ8ED8ghz9lJd-MQ2GqtRIeqhvhCkrUWQPw"
url:
description: URL of the oVirt/RHV engine API endpoint.
returned: success
type: str
sample: "https://ovirt.example.com/ovirt-engine/api"
ca_file:
description: CA file, which is used to verify SSL/TLS connection.
returned: success
type: str
sample: "ca.pem"
insecure:
description: Flag indicating if insecure connection is used.
returned: success
type: bool
sample: False
timeout:
description: Number of seconds to wait for response.
returned: success
type: int
sample: 0
compress:
description: Flag indicating if compression is used for connection.
returned: success
type: bool
sample: True
kerberos:
description: Flag indicating if kerberos is used for authentication.
returned: success
type: bool
sample: False
headers:
description: Dictionary of HTTP headers to be added to each API call.
returned: success
type: dict
'''
import os
import traceback
try:
import ovirtsdk4 as sdk
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import check_sdk
def main():
module = AnsibleModule(
argument_spec=dict(
url=dict(default=None),
hostname=dict(default=None),
username=dict(default=None),
password=dict(default=None, no_log=True),
ca_file=dict(default=None, type='path'),
insecure=dict(required=False, type='bool', default=None),
timeout=dict(required=False, type='int', default=0),
compress=dict(required=False, type='bool', default=True),
kerberos=dict(required=False, type='bool', default=False),
headers=dict(required=False, type='dict'),
state=dict(default='present', choices=['present', 'absent']),
token=dict(default=None, no_log=True),
ovirt_auth=dict(required=None, type='dict'),
),
required_if=[
('state', 'absent', ['ovirt_auth']),
],
supports_check_mode=True,
)
check_sdk(module)
state = module.params.get('state')
if state == 'present':
params = module.params
elif state == 'absent':
params = module.params['ovirt_auth']
def get_required_parameter(param, env_var, required=False):
var = params.get(param) or os.environ.get(env_var)
if not var and required and state == 'present':
module.fail_json(msg="'%s' is a required parameter." % param)
return var
url = get_required_parameter('url', 'OVIRT_URL', required=False)
hostname = get_required_parameter('hostname', 'OVIRT_HOSTNAME', required=False)
if url is None and hostname is None:
module.fail_json(msg="You must specify either 'url' or 'hostname'.")
if url is None and hostname is not None:
url = 'https://{0}/ovirt-engine/api'.format(hostname)
username = get_required_parameter('username', 'OVIRT_USERNAME')
password = get_required_parameter('password', 'OVIRT_PASSWORD')
token = get_required_parameter('token', 'OVIRT_TOKEN')
ca_file = get_required_parameter('ca_file', 'OVIRT_CAFILE')
insecure = params.get('insecure') if params.get('insecure') is not None else not bool(ca_file)
connection = sdk.Connection(
url=url,
username=username,
password=password,
ca_file=ca_file,
insecure=insecure,
timeout=params.get('timeout'),
compress=params.get('compress'),
kerberos=params.get('kerberos'),
headers=params.get('headers'),
token=token,
)
try:
token = connection.authenticate()
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_auth=dict(
token=token,
url=url,
ca_file=ca_file,
insecure=insecure,
timeout=params.get('timeout'),
compress=params.get('compress'),
kerberos=params.get('kerberos'),
headers=params.get('headers'),
) if state == 'present' else dict()
)
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
# Close the connection, but don't revoke token
connection.close(logout=state == 'absent')
if __name__ == "__main__":
main()
| 2ndQuadrant/ansible | lib/ansible/modules/cloud/ovirt/ovirt_auth.py | Python | gpl-3.0 | 11,230 |
#!/usr/bin/env python3
import argparse
import json
import sys
import urllib.parse
import urllib.request
parser = argparse.ArgumentParser(description='Certificate transparency name extractor.')
parser.add_argument('-d', '--domains', metavar='domain_file', type=str, help='Domains for certificate search.',
required=False)
parser.add_argument('domain', nargs='*')
args = parser.parse_intermixed_args()
def append_domain(lst, dom):
stripped = dom.strip().strip('.')
if stripped != '':
lst.append(stripped)
domains = []
if args.domains:
with open(args.domains) as f:
for line in f:
append_domain(domains, line)
for domain in args.domain:
append_domain(domains, domain)
for arg in domains:
subdomains = set()
with urllib.request.urlopen('https://crt.sh/?output=json&q=' + urllib.parse.quote('%.' + arg)) as f:
data = json.loads(f.read().decode('utf-8'))
for crt in data:
for domain in crt['name_value'].split('\n'):
if '@' in domain:
continue
if domain not in subdomains:
subdomains.add(domain)
try:
print(domain)
except BrokenPipeError:
# https://docs.python.org/3/library/signal.html#note-on-sigpipe
# Python flushes standard streams on exit; redirect remaining output
# to devnull to avoid another BrokenPipeError at shutdown
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, sys.stdout.fileno())
sys.exit(1) # Python exits with error code 1 on EPIPE
| blechschmidt/massdns | scripts/ct.py | Python | gpl-3.0 | 1,748 |
__author__ = 'Mike McCann'
__copyright__ = '2012'
__license__ = 'GPL v3'
__contact__ = 'mccann at mbari.org'
__doc__ = '''
STOQS Query manager for building ajax responses to selections made for QueryUI
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
from django.conf import settings
from django.db import transaction
from django.db.models import Q, Max, Min, Sum, Avg
from django.db.models.sql import query
from django.contrib.gis.geos import fromstr, MultiPoint
from django.db.utils import DatabaseError
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse
from stoqs import models
from loaders import MEASUREDINSITU, X3DPLATFORMMODEL, X3D_MODEL
from loaders.SampleLoaders import SAMPLED, NETTOW
from utils import round_to_n, postgresifySQL, EPOCH_STRING, EPOCH_DATETIME
from utils import getGet_Actual_Count, getShow_Sigmat_Parameter_Values, getShow_StandardName_Parameter_Values, getShow_All_Parameter_Values, getShow_Parameter_Platform_Data, getShow_Geo_X3D_Data
from utils import simplify_points, getParameterGroups
from geo import GPS
from MPQuery import MPQuery
from PQuery import PQuery
from Viz import MeasuredParameter, ParameterParameter, PPDatabaseException, PlatformAnimation
from coards import to_udunits
from datetime import datetime
from django.contrib.gis import gdal
import logging
import pprint
import calendar
import re
import locale
import time
import os
import tempfile
import numpy as np
logger = logging.getLogger(__name__)
# Constants to be also used by classifiers in contrib/analysis
LABEL = 'label'
DESCRIPTION = 'description'
COMMANDLINE = 'commandline'
spherical_mercator_srid = 3857
class STOQSQManager(object):
'''
This class is designed to handle building and managing queries against the STOQS database.
Chander Ganesan <chander@otg-nc.com>
'''
def __init__(self, request, response, dbname):
'''
This object should be created by passing in an HTTPRequest Object, an HTTPResponse object
and the name of the database to be used.
'''
self.request = request
self.dbname = dbname
self.response = response
self.mpq = MPQuery(request)
self.pq = PQuery(request)
self.pp = None
self._actual_count = None
self.initialQuery = True
# monkey patch sql/query.py to make it use our database for sql generation
query.DEFAULT_DB_ALIAS = dbname
# Dictionary of items that get returned via AJAX as the JSON response. Make available as member variable.
self.options_functions = {
'sampledparametersgroup': self.getParameters,
'measuredparametersgroup': self.getParameters,
'parameterminmax': self.getParameterMinMax,
'platforms': self.getPlatforms,
'time': self.getTime,
'depth': self.getDepth,
'simpledepthtime': self.getSimpleDepthTime,
##'simplebottomdepthtime': self.getSimpleBottomDepthTime,
'parametertime': self.getParameterTime,
'sampledepthtime': self.getSampleDepthTime,
'nettowdepthtime': self.getNetTowDepthTime,
'counts': self.getCounts,
'mpsql': self.getMeasuredParametersPostgreSQL,
'spsql': self.getSampledParametersPostgreSQL,
'extent': self.getExtent,
'activityparameterhistograms': self.getActivityParameterHistograms,
'parameterplatformdatavaluepng': self.getParameterPlatformDatavaluePNG,
'parameterparameterx3d': self.getParameterParameterX3D,
'measuredparameterx3d': self.getMeasuredParameterX3D,
'platformanimation': self.getPlatformAnimation,
'parameterparameterpng': self.getParameterParameterPNG,
'parameterplatforms': self.getParameterPlatforms,
'x3dterrains': self.getX3DTerrains,
'x3dplaybacks': self.getX3DPlaybacks,
'resources': self.getResources,
'attributes': self.getAttributes,
}
def buildQuerySets(self, *args, **kwargs):
'''
Build the query sets based on any selections from the UI. We need one for Activities and one for Samples
'''
kwargs['fromTable'] = 'Activity'
self._buildQuerySet(**kwargs)
kwargs['fromTable'] = 'Sample'
self._buildQuerySet(**kwargs)
kwargs['fromTable'] = 'ActivityParameter'
self._buildQuerySet(**kwargs)
kwargs['fromTable'] = 'ActivityParameterHistogram'
self._buildQuerySet(**kwargs)
def _buildQuerySet(self, *args, **kwargs):
'''
Build the query set based on any selections from the UI. For the first time through kwargs will be empty
and self.qs will be built of a join of activities, parameters, and platforms with no constraints.
Right now supported keyword arguments are the following:
sampledparametersgroup - a list of sampled parameter names to include
measuredparametersgroup - a list of measured parameter names to include
parameterstandardname - a list of parameter styandard_names to include
platforms - a list of platform names to include
time - a two-tuple consisting of a start and end time, if either is None, the assumption is no start (or end) time
depth - a two-tuple consisting of a range (start/end depth, if either is None, the assumption is no start (or end) depth
parametervalues - a dictionary of parameter names and tuples of min & max values to use as constraints
these are passed onto MPQuery and processed from the kwargs dictionary
parameterparameter - a tuple of Parameter ids for x, y, z axes and color for correlation plotting
These are all called internally - so we'll assume that all the validation has been done in advance,
and the calls to this method meet the requirements stated above.
'''
fromTable = 'Activity' # Default is Activity
if kwargs.has_key('fromTable'):
fromTable = kwargs['fromTable']
if 'qs' in args:
logger.debug('Using query string passed in to make a non-activity based query')
qs = args['qs']
else:
# Provide "base" querysets with depth and filters so that more efficient inner joins are generated
if fromTable == 'Activity':
logger.debug('Making default activity based query')
qs = models.Activity.objects.using(self.dbname).all() # To receive filters constructed below from kwargs
qs_platform = qs
elif fromTable == 'Sample':
logger.debug('Making %s based query', fromTable)
qs = models.Sample.objects.using(self.dbname).all() # To receive filters constructed below from kwargs
# Exclude sub (child) samples where name is not set. Flot UI needs a name for its selector
qs = qs.exclude(name__isnull=True)
elif fromTable == 'ActivityParameter':
logger.debug('Making %s based query', fromTable)
qs = models.ActivityParameter.objects.using(self.dbname).all() # To receive filters constructed below from kwargs
elif fromTable == 'ActivityParameterHistogram':
logger.debug('Making %s based query', fromTable)
qs = models.ActivityParameterHistogram.objects.using(self.dbname).all() # To receive filters constructed below from kwargs
else:
logger.exception('No handler for fromTable = %s', fromTable)
self.args = args
self.kwargs = kwargs
# Determine if this is the intial query and set a flag
for k, v in kwargs.iteritems():
# Test keys that can affect the MeasuredParameter count
if k == 'depth' or k == 'time':
if v[0] is not None or v[1] is not None:
self.initialQuery = False
elif k in ['measuredparametersgroup', 'parameterstandardname', 'platforms']:
if v:
logger.debug('Setting self.initialQuery = False because %s = %s', k, v)
self.initialQuery = False
logger.debug('self.initialQuery = %s', self.initialQuery)
# Check to see if there is a "builder" for a Q object using the given parameters and build up the filter from the Q objects
for k, v in kwargs.iteritems():
if not v:
continue
if k == 'fromTable':
continue
if hasattr(self, '_%sQ' % (k,)):
# Call the method if it exists, and add the resulting Q object to the filtered queryset.
q = getattr(self,'_%sQ' % (k,))(v, fromTable)
logger.debug('fromTable = %s, k = %s, v = %s, q = %s', fromTable, k, v, q)
qs = qs.filter(q)
if k != 'platforms' and fromTable == 'Activity':
qs_platform = qs_platform.filter(q)
# Assign query sets for the current UI selections
if fromTable == 'Activity':
self.qs = qs.using(self.dbname)
self.qs_platform = qs_platform.using(self.dbname)
##logger.debug('Activity query = %s', str(self.qs.query))
elif fromTable == 'Sample':
self.sample_qs = qs.using(self.dbname)
##logger.debug('Sample query = %s', str(self.sample_qs.query))
elif fromTable == 'ActivityParameter':
self.activityparameter_qs = qs.using(self.dbname)
##logger.debug('activityparameter_qs = %s', str(self.activityparameter_qs.query))
elif fromTable == 'ActivityParameterHistogram':
self.activityparameterhistogram_qs = qs.using(self.dbname)
##logger.debug('activityparameterhistogram_qs = %s', str(self.activityparameterhistogram_qs.query))
def generateOptions(self):
'''
Generate a dictionary of all the selectable parameters by executing each of the functions
to generate those parameters. In this case, we'll simply do it by defining the dictionary and it's associated
function, then iterate over that dictionary calling the function(s) to get the value to be returned.
Note that in the case of parameters the return is a list of 2-tuples of (name, standard_name) and for
platforms the result is a list of 3-tuples of (name, id, color) the associated elements.
For time and depth, the result is a single 2-tuple with the min and max value (respectively.)
These objects are "simple" dictionaries using only Python's built-in types - so conversion to a
corresponding JSON object should be trivial.
'''
results = {}
for k,v in self.options_functions.iteritems():
if self.kwargs['only'] != []:
if k not in self.kwargs['only']:
continue
if k in self.kwargs['except']:
continue
if k == 'measuredparametersgroup':
results[k] = v(MEASUREDINSITU)
elif k == 'sampledparametersgroup':
results[k] = v(SAMPLED)
else:
results[k] = v()
##logger.info('qs.query = %s', pprint.pformat(str(self.qs.query)))
##logger.info('results = %s', pprint.pformat(results))
return results
#
# Methods that generate summary data, based on the current query criteria
#
def getCounts(self):
'''
Collect all of the various counts into a dictionary
'''
# Always get approximate count
logger.debug('str(self.getActivityParametersQS(forCount=True).query) = %s', str(self.getActivityParametersQS(forCount=True).query))
approximate_count = self.getActivityParametersQS(forCount=True).aggregate(Sum('number'))['number__sum']
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
# Actual counts are None unless the 'Get actual count' box is checked
actual_count = None
actual_count_localized = None
if getGet_Actual_Count(self.kwargs):
if not self.mpq.qs_mp:
self.mpq.buildMPQuerySet(*self.args, **self.kwargs)
if self._actual_count:
actual_count = self._actual_count
else:
logger.debug('Calling self.mpq.getMPCount()')
actual_count = self.mpq.getMPCount()
logger.debug('actual_count = %s', actual_count)
try:
approximate_count_localized = locale.format("%d", approximate_count, grouping=True)
except TypeError:
logger.exception('Failed to format approximate_count = %s into a number', approximate_count)
approximate_count_localized = None
if actual_count:
try:
actual_count_localized = locale.format("%d", actual_count, grouping=True)
except TypeError:
logger.exception('Failed to format actual_count = %s into a number', actual_count)
return { 'ap_count': self.getAPCount(),
'approximate_count': approximate_count,
'approximate_count_localized': approximate_count_localized,
'actual_count': actual_count,
'actual_count_localized': actual_count_localized
}
def getMeasuredParametersPostgreSQL(self):
'''
Wrapper around self.mpq.getMeasuredParametersPostgreSQL(), ensure that we have qs_mp built before calling
'''
sql = ''
if not self.mpq.qs_mp:
self.mpq.buildMPQuerySet(*self.args, **self.kwargs)
self.mpq.initialQuery = self.initialQuery
try:
sql = self.mpq.getMeasuredParametersPostgreSQL()
self._actual_count = self.mpq.getMPCount()
except Exception as e:
logger.warn('Could not get MeasuredParametersPostgreSQL: %s', e)
return sql
def getSampledParametersPostgreSQL(self):
'''
Wrapper around self.mpq.getSampledParametersPostgreSQL(), ensure that we have qs_mp built before calling
'''
if not self.mpq.qs_mp:
self.mpq.buildMPQuerySet(*self.args, **self.kwargs)
self.mpq.initialQuery = self.initialQuery
sql = self.mpq.getSampledParametersPostgreSQL()
return sql
def getAPCount(self):
'''
Return count of ActivityParameters given the current constraints
'''
qs_ap = self.getActivityParametersQS() # Approximate count from ActivityParameter
if qs_ap:
return qs_ap.count()
else:
return 0
def getActivityParametersQS(self, forCount=False):
'''
Return query set of ActivityParameters given the current constraints.
If forCount is True then add list of measured parameters to the query; this is done here for the query
needed for getting the count. The ParameterParameter min & max query also uses self.activityparameter_qs
and we don't want the addition of the measured parameters query for that.
'''
if not self.activityparameter_qs:
logger.warn("self.activityparameter_qs is None")
if forCount:
if self.kwargs['measuredparametersgroup']:
logger.debug('Adding Q object for parameter__name__in = %s', self.kwargs['measuredparametersgroup'])
return self.activityparameter_qs.filter(Q(parameter__name__in=self.kwargs['measuredparametersgroup']))
else:
return self.activityparameter_qs
else:
return self.activityparameter_qs
def getActivityParameterHistogramsQS(self):
'''
Return query set of ActivityParameterHistograms given the current constraints.
'''
return self.activityparameterhistogram_qs
def getSampleQS(self):
'''
Return query set of Samples given the current constraints.
'''
return self.sample_qs
def getParameters(self, groupName=''):
'''
Get a list of the unique parameters that are left based on the current query criteria. Also
return the UUID's of those, since we need to return those to perform the query later.
Lastly, we assume here that the name is unique and is also used for the id - this is enforced on
data load.
'''
# Django makes it easy to do sub-queries: Get Parameters from list of Activities matching current selection
p_qs = models.Parameter.objects.using(self.dbname).filter(Q(activityparameter__activity__in=self.qs)).order_by('name')
if 'mplabels' in self.kwargs:
if self.kwargs['mplabels']:
# Get all Parameters that have common Measurements given the filter of the selected labels
# - this allows selection of co-located MeasuredParameters
commonMeasurements = models.MeasuredParameterResource.objects.using(self.dbname).filter(
resource__id__in=self.kwargs['mplabels']).values_list(
'measuredparameter__measurement__id', flat=True)
p_qs = p_qs.filter(Q(id__in=models.MeasuredParameter.objects.using(self.dbname).filter(
Q(measurement__id__in=commonMeasurements)).values_list('parameter__id', flat=True).distinct()))
if groupName:
p_qs = p_qs.filter(parametergroupparameter__parametergroup__name=groupName)
p_qs = p_qs.values('name','standard_name','id','units').distinct().order_by('name')
# Odd: Trying to print the query gives "Can't do subqueries with queries on different DBs."
##logger.debug('----------- p_qs.query (%s) = %s', groupName, str(p_qs.query))
results=[]
for row in p_qs:
name = row['name']
standard_name = row['standard_name']
id = row['id']
units = row['units']
if not standard_name:
standard_name = ''
if name is not None:
results.append((name,standard_name,id,units))
return results
def getParameterMinMax(self, pid=None, percentileAggregateType='avg'):
'''
If a single parameter has been selected in the filter for data access return the average 2.5 and 97.5
percentiles of the data and call them min and max for purposes of data access, namely KML generation in
the UI - assign these values to the 'dataaccess' key of the return hash. If pid is specificed then
assign values to the 'plot' key of the return hash. If @percentileAggregateType is 'avg' (the default)
then the average of all the 2.5 and 97.5 percentiles will be used. This would be appropriate for
contour or scatter plotting. If @percentileAggregateType is 'extrema' then the aggregate Min is used
for 'p010' and Max for 'p990'. This is appropriate for parameter-parameter plotting.
'''
da_results = []
plot_results = []
# pid takes precedence over parameterplot being specified in kwargs
if pid:
try:
if percentileAggregateType == 'extrema':
logger.debug('self.getActivityParametersQS().filter(parameter__id=%s) = %s', pid, str(self.getActivityParametersQS().filter(parameter__id=pid).query))
qs = self.getActivityParametersQS().filter(parameter__id=pid).aggregate(Min('p010'), Max('p990'), Avg('median'))
logger.debug('qs = %s', qs)
try:
plot_results = [pid, round_to_n(qs['p010__min'],4), round_to_n(qs['p990__max'],4)]
except TypeError:
logger.exception('Failed to get plot_results for qs = %s', qs)
else:
qs = self.getActivityParametersQS().filter(parameter__id=pid).aggregate(Avg('p025'), Avg('p975'), Avg('median'))
try:
plot_results = [pid, round_to_n(qs['p025__avg'],4), round_to_n(qs['p975__avg'],4)]
if plot_results[1] == plot_results[2]:
logger.debug('Standard min and max for for pid %s are the same. Getting the overall min and max values.', pid)
qs = self.getActivityParametersQS().filter(parameter__id=pid).aggregate(Min('p025'), Max('p975'))
plot_results = [pid, round_to_n(qs['p025__min'],4), round_to_n(qs['p975__max'],4)]
except TypeError:
logger.exception('Failed to get plot_results for qs = %s', qs)
except ValueError as e:
if pid in ('longitude', 'latitude'):
# Get limits from Activity maptrack for which we have our getExtent() method
extent, lon_mid, lat_mid = self.getExtent(outputSRID=4326)
if pid == 'longitude':
plot_results = ['longitude', round_to_n(extent[0][0], 4), round_to_n(extent[1][0],4)]
if pid == 'latitude':
plot_results = ['latitude', round_to_n(extent[0][1], 4), round_to_n(extent[1][1],4)]
elif pid == 'depth':
dminmax = self.qs.aggregate(Min('mindepth'), Max('maxdepth'))
plot_results = ['depth', round_to_n(dminmax['mindepth__min'], 4), round_to_n(dminmax['maxdepth__max'],4)]
elif pid == 'time':
epoch = EPOCH_DATETIME
tminmax = self.qs.aggregate(Min('startdate'), Max('enddate'))
tmin = (tminmax['startdate__min'] - epoch).days + (tminmax['startdate__min'] - epoch).seconds / 86400.
tmax = (tminmax['enddate__max'] - epoch).days + (tminmax['enddate__max'] - epoch).seconds / 86400.
plot_results = ['time', tmin, tmax]
else:
logger.error('%s, but pid text = %s is not a coordinate', e, pid)
return {'plot': plot_results, 'dataaccess': []}
elif 'parameterplot' in self.kwargs:
if self.kwargs['parameterplot'][0]:
parameterID = self.kwargs['parameterplot'][0]
try:
if percentileAggregateType == 'extrema':
qs = self.getActivityParametersQS().filter(parameter__id=parameterID).aggregate(Min('p025'), Max('p975'))
plot_results = [parameterID, round_to_n(qs['p025__min'],4), round_to_n(qs['p975__max'],4)]
else:
qs = self.getActivityParametersQS().filter(parameter__id=parameterID).aggregate(Avg('p025'), Avg('p975'))
plot_results = [parameterID, round_to_n(qs['p025__avg'],4), round_to_n(qs['p975__avg'],4)]
except TypeError as e:
logger.exception(e)
if self.kwargs.has_key('measuredparametersgroup'):
if len(self.kwargs['measuredparametersgroup']) == 1:
mpname = self.kwargs['measuredparametersgroup'][0]
try:
pid = models.Parameter.objects.using(self.dbname).get(name=mpname).id
logger.debug('pid = %s', pid)
if percentileAggregateType == 'extrema':
qs = self.getActivityParametersQS().filter(parameter__id=pid).aggregate(Min('p010'), Max('p990'))
da_results = [pid, round_to_n(qs['p010__min'],4), round_to_n(qs['p990__max'],4)]
else:
qs = self.getActivityParametersQS().filter(parameter__id=pid).aggregate(Avg('p025'), Avg('p975'))
da_results = [pid, round_to_n(qs['p025__avg'],4), round_to_n(qs['p975__avg'],4)]
except TypeError as e:
logger.exception(e)
if self.kwargs.has_key('sampledparametersgroup'):
if len(self.kwargs['sampledparametersgroup']) == 1:
spid = self.kwargs['sampledparametersgroup'][0]
try:
pid = models.Parameter.objects.using(self.dbname).get(id=spid).id
logger.debug('pid = %s', pid)
if percentileAggregateType == 'extrema':
qs = self.getActivityParametersQS().filter(parameter__id=pid).aggregate(Min('p010'), Max('p990'))
da_results = [pid, round_to_n(qs['p010__min'],4), round_to_n(qs['p990__max'],4)]
else:
qs = self.getActivityParametersQS().filter(parameter__id=pid).aggregate(Avg('p025'), Avg('p975'))
da_results = [pid, round_to_n(qs['p025__avg'],4), round_to_n(qs['p975__avg'],4)]
except TypeError as e:
logger.exception(e)
if self.kwargs.has_key('parameterstandardname'):
if len(self.kwargs['parameterstandardname']) == 1:
sname = self.kwargs['parameterstandardname'][0]
try:
if percentileAggregateType == 'extrema':
qs = self.getActivityParametersQS().filter(parameter__standard_name=sname).aggregate(Min('p025'), Max('p975'))
da_results = [sname, round_to_n(qs['p025__min'],4), round_to_n(qs['p975__max'],4)]
else:
qs = self.getActivityParametersQS().filter(parameter__standard_name=sname).aggregate(Avg('p025'), Avg('p975'))
da_results = [sname, round_to_n(qs['p025__avg'],4), round_to_n(qs['p975__avg'],4)]
except TypeError as e:
logger.exception(e)
# Sometimes da_results is empty, make it the same as plot_results if this happens
# TODO: simplify the logic implemented above...
if not da_results:
da_results = plot_results
return {'plot': plot_results, 'dataaccess': da_results}
def _getPlatformModel(self, platformName):
'''Return Platform X3D model information. Designed for stationary
platforms from non-trajectory Activities.
'''
@transaction.atomic(using=self.dbname)
def _innerGetPlatformModel(self, platform):
modelInfo = None, None, None, None
pModel = models.PlatformResource.objects.using(self.dbname).filter(
resource__resourcetype__name=X3DPLATFORMMODEL,
resource__name=X3D_MODEL,
platform__name=platformName).values_list(
'resource__uristring', flat=True).distinct()
if pModel:
# Timeseries and timeseriesProfile data for a single platform
# (even if composed of multiple Activities) must have single
# unique horizontal position.
geom_list = self.qs.filter(platform__name=platformName).values_list(
'nominallocation__geom', flat=True).distinct()
try:
geom = geom_list[0]
except IndexError:
return modelInfo
if len(geom_list) > 1:
logger.error('More than one location for %s returned.'
'Using first one found: %s', platformName, geom)
# TimeseriesProfile data has multiple nominaldepths - look to
# Resource for nominaldepth of the Platform for these kind of data.
depth_list = self.qs.filter(platform__name=platformName).values_list(
'nominallocation__depth', flat=True).distinct()
if len(depth_list) > 1:
logger.debug('More than one depth for %s returned. Checking '
'Resource for nominaldepth', platformName)
try:
depth = float(models.PlatformResource.objects.using(self.dbname).filter(
resource__resourcetype__name=X3DPLATFORMMODEL,
platform__name=platformName,
resource__name='X3D_MODEL_nominaldepth'
).values_list('resource__value', flat=True)[0])
except (IndexError, ObjectDoesNotExist):
logger.warn('Resource name X3D_MODEL_nominaldepth not found for '
'for platform %s. Using a nominaldepth of 0.0', platformName)
depth = 0.0
else:
depth = depth_list[0]
modelInfo = (pModel[0], geom.y, geom.x,
-depth * float(self.request.GET.get('ve', 10)))
return modelInfo
return _innerGetPlatformModel(self, platformName)
def getPlatforms(self):
'''
Get a list of the unique platforms that are left based on the current query criteria.
We assume here that the name is unique and is also used for the id - this is enforced on
data load. Organize the platforms into a dictionary keyed by platformType.
'''
qs = self.qs_platform.values('platform__uuid', 'platform__name', 'platform__color', 'platform__platformtype__name'
).distinct().order_by('platform__name')
results = []
platformTypeHash = {}
for row in qs:
name=row['platform__name']
id=row['platform__name']
color=row['platform__color']
platformType = row['platform__platformtype__name']
if name is not None and id is not None:
# Get the featureType from the Resource
fts = models.ActivityResource.objects.using(self.dbname).filter(resource__name='featureType',
activity__platform__name=name).values_list('resource__value', flat=True).distinct()
try:
featureType = fts[0]
except IndexError:
logger.warn('No featureType returned for platform name = %s. Setting it to "trajectory".', name)
featureType = 'trajectory'
if len(fts) > 1:
logger.warn('More than one featureType returned for platform %s: %s. Using the first one.', name, fts)
if 'trajectory' in featureType:
try:
platformTypeHash[platformType].append((name, id, color, featureType, ))
except KeyError:
platformTypeHash[platformType] = []
platformTypeHash[platformType].append((name, id, color, featureType, ))
else:
x3dModel, x, y, z = self._getPlatformModel(name)
if x3dModel:
try:
platformTypeHash[platformType].append((name, id, color, featureType, x3dModel, x, y, z))
except KeyError:
platformTypeHash[platformType] = []
platformTypeHash[platformType].append((name, id, color, featureType, x3dModel, x, y, z))
else:
try:
platformTypeHash[platformType].append((name, id, color, featureType, ))
except KeyError:
platformTypeHash[platformType] = []
platformTypeHash[platformType].append((name, id, color, featureType, ))
return platformTypeHash
def getTime(self):
'''
Based on the current selected query criteria, determine the available time range. That'll be
returned as a 2-tuple as the min and max values that are selectable.
'''
# Documentation of some query optimization (tested with dorado & tethys data from June 2010 loaded with a stide of 100)
# =====================================================================================================================
# The statements:
# qs=self.qs.aggregate(Max('instantpoint__timevalue'), Min('instantpoint__timevalue'))
# return (qs['instantpoint__timevalue__min'], qs['instantpoint__timevalue__max'],)
# produce this SQL which takes 75.2 ms to execute:
# stoqs_june2011=# explain analyze SELECT DISTINCT MAX("stoqs_instantpoint"."timevalue") AS "instantpoint__timevalue__max", MIN("stoqs_instantpoint"."timevalue") AS "instantpoint__timevalue__min" FROM "stoqs_activity" LEFT OUTER JOIN "stoqs_instantpoint" ON ("stoqs_activity"."id" = "stoqs_instantpoint"."activity_id");
# QUERY PLAN
# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# HashAggregate (cost=738.13..738.14 rows=1 width=8) (actual time=75.154..75.154 rows=1 loops=1)
# -> Aggregate (cost=738.11..738.12 rows=1 width=8) (actual time=75.144..75.145 rows=1 loops=1)
# -> Merge Left Join (cost=0.00..629.34 rows=21755 width=8) (actual time=0.032..51.337 rows=21726 loops=1)
# Merge Cond: (stoqs_activity.id = stoqs_instantpoint.activity_id)
# -> Index Scan using stoqs_activity_pkey on stoqs_activity (cost=0.00..17.58 rows=45 width=4) (actual time=0.008..0.058 rows=36 loops=1)
# -> Index Scan using stoqs_instantpoint_activity_id on stoqs_instantpoint (cost=0.00..707.58 rows=21755 width=12) (actual time=0.016..19.982 rows=21726 loops=1)
# Total runtime: 75.231 ms
# (7 rows)
#
# The statements:
# qs=self.qs.aggregate(Max('enddate'), Min('startdate'))
# return (qs['startdate__min'], qs['enddate__max'],)
# take 0.22 ms
# stoqs_june2011=# explain analyze SELECT DISTINCT MIN("stoqs_activity"."startdate") AS "startdate__min", MAX("stoqs_activity"."enddate") AS "enddate__max" FROM "stoqs_activity";
# QUERY PLAN
# -----------------------------------------------------------------------------------------------------------------------
# HashAggregate (cost=5.69..5.70 rows=1 width=16) (actual time=0.154..0.156 rows=1 loops=1)
# -> Aggregate (cost=5.67..5.69 rows=1 width=16) (actual time=0.143..0.144 rows=1 loops=1)
# -> Seq Scan on stoqs_activity (cost=0.00..5.45 rows=45 width=16) (actual time=0.009..0.064 rows=36 loops=1)
# Total runtime: 0.219 ms
# (4 rows)
#
# While only a fraction of a second different, it is 342 times faster!
qs=self.qs.aggregate(Max('enddate'), Min('startdate'))
try:
times = (time.mktime(qs['startdate__min'].timetuple())*1000, time.mktime(qs['enddate__max'].timetuple())*1000,)
except AttributeError:
logger.exception('Failed to get timetuple from qs = %s', qs)
return
else:
return times
def getDepth(self):
'''
Based on the current selected query criteria, determine the available depth range. That'll be
returned as a 2-tuple as the min and max values that are selectable.
'''
# Original query that dives into the measurment table via instantpoint
##qs=self.qs.aggregate(Max('instantpoint__measurement__depth'), Min('instantpoint__measurement__depth'))
##return (qs['instantpoint__measurement__depth__min'],qs['instantpoint__measurement__depth__max'])
# Alternate query to use stats stored with the Activity
qs=self.qs.aggregate(Max('maxdepth'), Min('mindepth'))
try:
depths = ('%.2f' % qs['mindepth__min'], '%.2f' % qs['maxdepth__max'])
except TypeError:
logger.exception('Failed to convert mindepth__min and/or maxdepth__max to float from qs = %s', qs)
return
else:
return depths
def getSimpleDepthTime(self):
'''
Based on the current selected query criteria for activities, return the associated SimpleDepth time series
values as a 2-tuple list inside a 2 level hash of platform__name (with its color) and activity__name.
'''
sdt = {}
colors = {}
trajectoryQ = self._trajectoryQ()
timeSeriesQ = self._timeSeriesQ()
timeSeriesProfileQ = self._timeSeriesProfileQ()
trajectoryProfileQ = self._trajectoryProfileQ()
for plats in self.getPlatforms().values():
for p in plats:
plq = Q(platform__name = p[0])
sdt[p[0]] = {}
colors[p[0]] = p[2]
if p[3].lower() == 'trajectory':
# Overkill to also filter on trajectoryQ too if p[3].lower() == 'trajectory' - old Tethys data does not have NC_GLOBAL featureType
qs_traj = self.qs.filter(plq).values_list( 'simpledepthtime__epochmilliseconds', 'simpledepthtime__depth',
'name').order_by('simpledepthtime__epochmilliseconds')
# Add to sdt hash date-time series organized by activity__name key within a platform__name key
# This will let flot plot the series with gaps between the surveys -- not connected
for s in qs_traj:
try:
##logger.debug('s[2] = %s', s[2])
sdt[p[0]][s[2]].append( [s[0], '%.2f' % s[1]] )
except KeyError:
##logger.debug('First time seeing activity__name = %s, making it a list in sdt', s[2])
sdt[p[0]][s[2]] = [] # First time seeing activity__name, make it a list
if s[1] is not None:
sdt[p[0]][s[2]].append( [s[0], '%.2f' % s[1]] ) # Append first value, even if it is 0.0
except TypeError:
continue # Likely "float argument required, not NoneType"
elif p[3].lower() == 'timeseries' or p[3].lower() == 'timeseriesprofile':
iptvq = Q()
qs_tsp = None
if 'time' in self.kwargs:
if self.kwargs['time'][0] is not None and self.kwargs['time'][1] is not None:
iptvq = Q(instantpoint__timevalue__gte = self.kwargs['time'][0]) & Q(instantpoint__timevalue__lte = self.kwargs['time'][1])
qs_tsp = self.qs.filter(plq & (timeSeriesQ | timeSeriesProfileQ) & iptvq).annotate(mintime=Min('instantpoint__timevalue'),
maxtime=Max('instantpoint__timevalue')).select_related().values( 'name',
'simpledepthtime__nominallocation__depth', 'mintime', 'maxtime').order_by(
'simpledepthtime__nominallocation__depth').distinct()
if not qs_tsp:
qs_tsp = self.qs.filter(plq & (timeSeriesQ | timeSeriesProfileQ)).select_related().values(
'simpledepthtime__epochmilliseconds', 'simpledepthtime__depth', 'name',
'simpledepthtime__nominallocation__depth').order_by('simpledepthtime__epochmilliseconds').distinct()
# Add to sdt hash date-time series organized by activity__name_nominallocation__depth key within a platform__name key
for sd in qs_tsp:
##logger.debug('sd = %s', sd)
an_nd = '%s_%s' % (sd['name'], sd['simpledepthtime__nominallocation__depth'])
##logger.debug('an_nd = %s', an_nd)
##logger.debug('sd = %s', sd)
if 'simpledepthtime__epochmilliseconds' in sd:
try:
sdt[p[0]][an_nd].append( [sd['simpledepthtime__epochmilliseconds'], '%.2f' % sd['simpledepthtime__nominallocation__depth']] )
except KeyError:
sdt[p[0]][an_nd] = [] # First time seeing this activityName_nominalDepth, make it a list
if sd['simpledepthtime__nominallocation__depth']:
sdt[p[0]][an_nd].append( [sd['simpledepthtime__epochmilliseconds'], '%.2f' % sd['simpledepthtime__nominallocation__depth']] )
except TypeError:
continue # Likely "float argument required, not NoneType"
else:
s_ems = int(1000 * to_udunits(sd['mintime'], 'seconds since 1970-01-01'))
e_ems = int(1000 * to_udunits(sd['maxtime'], 'seconds since 1970-01-01'))
try:
sdt[p[0]][an_nd].append( [s_ems, '%.2f' % sd['simpledepthtime__nominallocation__depth']] )
sdt[p[0]][an_nd].append( [e_ems, '%.2f' % sd['simpledepthtime__nominallocation__depth']] )
except KeyError:
sdt[p[0]][an_nd] = [] # First time seeing this activityName_nominalDepth, make it a list
if sd['simpledepthtime__nominallocation__depth']:
sdt[p[0]][an_nd].append( [s_ems, '%.2f' % sd['simpledepthtime__nominallocation__depth']] )
sdt[p[0]][an_nd].append( [e_ems, '%.2f' % sd['simpledepthtime__nominallocation__depth']] )
except TypeError:
continue # Likely "float argument required, not NoneType"
elif p[3].lower() == 'trajectoryprofile':
iptvq = Q()
qs_tp = None
if 'time' in self.kwargs:
if self.kwargs['time'][0] is not None and self.kwargs['time'][1] is not None:
s_ems = time.mktime(datetime.strptime(self.kwargs['time'][0], '%Y-%m-%d %H:%M:%S').timetuple())*1000
e_ems = time.mktime(datetime.strptime(self.kwargs['time'][1], '%Y-%m-%d %H:%M:%S').timetuple())*1000
iptvq = Q(simpledepthtime__epochmilliseconds__gte = s_ems) & Q(simpledepthtime__epochmilliseconds__lte = e_ems)
qs_tp = self.qs.filter(plq & trajectoryProfileQ & iptvq).select_related().values( 'name', 'simpledepthtime__depth',
'simpledepthtime__nominallocation__depth', 'simpledepthtime__epochmilliseconds').order_by(
'simpledepthtime__nominallocation__depth', 'simpledepthtime__epochmilliseconds').distinct()
if not qs_tp:
qs_tp = self.qs.filter(plq & trajectoryProfileQ).select_related().values( 'name', 'simpledepthtime__depth',
'simpledepthtime__nominallocation__depth', 'simpledepthtime__epochmilliseconds').order_by(
'simpledepthtime__nominallocation__depth', 'simpledepthtime__epochmilliseconds').distinct()
# Add to sdt hash date-time series organized by activity__name_nominallocation__depth key within a platform__name key - use real depths
for sd in qs_tp:
##logger.debug('sd = %s', sd)
an_nd = '%s_%s' % (sd['name'], sd['simpledepthtime__nominallocation__depth'])
##logger.debug('an_nd = %s', an_nd)
if 'simpledepthtime__epochmilliseconds' in sd:
try:
sdt[p[0]][an_nd].append( [sd['simpledepthtime__epochmilliseconds'], '%.2f' % sd['simpledepthtime__depth']] )
except KeyError:
sdt[p[0]][an_nd] = [] # First time seeing this activityName_nominalDepth, make it a list
if sd['simpledepthtime__depth']:
sdt[p[0]][an_nd].append( [sd['simpledepthtime__epochmilliseconds'], '%.2f' % sd['simpledepthtime__depth']] )
except TypeError:
continue # Likely "float argument required, not NoneType"
return({'sdt': sdt, 'colors': colors})
def getSimpleBottomDepthTime(self):
'''
Based on the current selected query criteria for activities, return the associated SimpleBottomDepth time series
values as a 2-tuple list inside a 2 level hash of platform__name and activity__name. Append a third value to the
x,y time series of a maximum depth (positive number in meters) so that Flot will fill downward. See:
http://stackoverflow.com/questions/23790277/flot-fill-color-above-a-line-graph
'''
sbdt = {}
maxDepth = 10971 # Max ocean depth
trajectoryQ = self._trajectoryQ()
for plats in self.getPlatforms().values():
for p in plats:
plq = Q(platform__name = p[0])
sbdt[p[0]] = {}
if p[3].lower() == 'trajectory':
qs_traj = self.qs.filter(plq & trajectoryQ).values_list( 'simplebottomdepthtime__epochmilliseconds', 'simplebottomdepthtime__bottomdepth',
'name').order_by('simplebottomdepthtime__epochmilliseconds')
# Add to sbdt hash date-time series organized by activity__name key within a platform__name key
# This will let flot plot the series with gaps between the surveys -- not connected
for s in qs_traj:
try:
sbdt[p[0]][s[2]].append( [s[0], '%.2f' % s[1], maxDepth] )
except KeyError:
sbdt[p[0]][s[2]] = [] # First time seeing activity__name, make it a list
if s[1] is not None:
sbdt[p[0]][s[2]].append( [s[0], '%.2f' % s[1], maxDepth] ) # Append first value, even if it is 0.0
except TypeError:
continue # Likely "float argument required, not NoneType"
return({'sbdt': sbdt})
#
# The following set of private (_...) methods are for building the parametertime response
#
def _collectParameters(self, platform, pt, pa_units, is_standard_name, ndCounts, strides, colors):
'''
Get parameters for this platform and collect units in a parameter name hash, use standard_name if set and repair bad names.
Return a tuple of pa_units, is_standard_name, ndCounts, and pt dictionaries.
'''
# Get parameters for this platform and collect units in a parameter name hash, use standard_name if set and repair bad names
p_qs = models.Parameter.objects.using(self.dbname).filter(Q(activityparameter__activity__in=self.qs))
logger.debug("self.kwargs['parametertimeplotid'] = %s", self.kwargs['parametertimeplotid'])
if self.kwargs['parametertimeplotid']:
p_qs = p_qs.filter(Q(id__in=self.kwargs['parametertimeplotid']))
p_qs = p_qs.filter(activityparameter__activity__platform__name=platform[0]).distinct()
for parameter in p_qs:
unit = parameter.units
# Get the number of nominal depths for this parameter
nds = models.NominalLocation.objects.using(self.dbname
).filter( Q(activity__in=self.qs),
activity__platform__name=platform[0],
measurement__measuredparameter__parameter=parameter
).values('depth').distinct().count()
# Check if timeSeries plotting is requested for trajectory data
plotTimeSeriesDepth = models.ParameterResource.objects.using(self.dbname).filter(parameter__name=parameter,
resource__name='plotTimeSeriesDepth').values_list('resource__value')
if nds == 0 and not plotTimeSeriesDepth:
continue
if parameter.standard_name == 'sea_water_salinity':
unit = 'PSU'
if parameter.standard_name and parameter.standard_name.strip() != '':
logger.debug('Parameter name "%s" has standard_name = %s', parameter.name, parameter.standard_name)
pa_units[parameter.standard_name] = unit
is_standard_name[parameter.standard_name] = True
ndCounts[parameter.standard_name] = nds
colors[parameter.standard_name] = parameter.id
strides[parameter.standard_name] = {}
else:
logger.debug('Parameter name "%s" does not have a standard_name', parameter.name)
pa_units[parameter.name] = unit
is_standard_name[parameter.name] = False
ndCounts[parameter.name] = nds
colors[parameter.name] = parameter.id
strides[parameter.name] = {}
# Initialize pt dictionary of dictionaries with its keys
if unit not in pt.keys():
logger.debug('Initializing pt[%s] = {}', unit)
pt[unit] = {}
return (pa_units, is_standard_name, ndCounts, pt, colors, strides)
def _getParameterTimeFromMP(self, qs_mp, pt, pa_units, a, p, is_standard_name, stride):
'''
Return hash of time series measuredparameter data with specified stride
'''
# See if timeSeries plotting is requested for trajectory data, e.g. BEDS
plotTimeSeriesDepth = models.ParameterResource.objects.using(self.dbname).filter(parameter__name=p,
resource__name='plotTimeSeriesDepth').values_list('resource__value')
if not plotTimeSeriesDepth:
# See if there is one for standard_name
plotTimeSeriesDepth = models.ParameterResource.objects.using(self.dbname).filter(parameter__standard_name=p,
resource__name='plotTimeSeriesDepth').values_list('resource__value')
# Order by nominal depth first so that strided access collects data correctly from each depth
pt_qs_mp = qs_mp.order_by('measurement__nominallocation__depth', 'measurement__instantpoint__timevalue')[::stride]
logger.debug('Adding time series of parameter = %s in key = %s', p, pa_units[p])
for mp in pt_qs_mp:
if not mp['datavalue']:
continue
tv = mp['measurement__instantpoint__timevalue']
ems = int(1000 * to_udunits(tv, 'seconds since 1970-01-01'))
nd = mp['measurement__depth'] # Will need to switch to mp['measurement__mominallocation__depth'] when
# mooring microcat actual depths are put into mp['measurement__depth']
##if p == 'BED_DEPTH':
## logger.debug('nd = %s, tv = %s', nd, tv)
## raise Exception('DEBUG') # Useful for examining queries in the postgresql log
if plotTimeSeriesDepth:
an_nd = "%s - %s starting @ %s m" % (p, a.name, plotTimeSeriesDepth[0][0],)
else:
an_nd = "%s - %s @ %s" % (p, a.name, nd,)
try:
pt[pa_units[p]][an_nd].append((ems, mp['datavalue']))
except KeyError:
pt[pa_units[p]][an_nd] = []
pt[pa_units[p]][an_nd].append((ems, mp['datavalue']))
return pt
def _getParameterTimeFromAP(self, pt, pa_units, a, p):
'''
Return hash of time series min and max values for specified activity and parameter. To be used when duration
of an activity is less than the pixel width of the flot plot area. This can occur for short event data sets
such as from Benthic Event Detector deployments.
'''
aps = models.ActivityParameter.objects.using(self.dbname).filter(activity=a, parameter__name=p).values('min', 'max')
start_ems = int(1000 * to_udunits(a.startdate, 'seconds since 1970-01-01'))
end_ems = int(1000 * to_udunits(a.startdate, 'seconds since 1970-01-01'))
pt[pa_units[p]][a.name] = [[start_ems, aps[0]['min']], [end_ems, aps[0]['max']]]
return pt
def _parameterInSelection(self, p, is_standard_name, parameterType=MEASUREDINSITU):
'''
Return True if parameter name is in the UI selection, either from constraints other than
direct selection or if specifically selected in the UI.
'''
isInSelection = False
if is_standard_name[p]:
if p in [parms[1] for parms in self.getParameters(parameterType)]:
isInSelection = True
else:
if p in [parms[0] for parms in self.getParameters(parameterType)]:
isInSelection = True
if not isInSelection:
if self.kwargs['measuredparametersgroup']:
if p in self.kwargs['measuredparametersgroup']:
isInSelection = True
else:
isInSelection = False
return isInSelection
def _buildParameterTime(self, pa_units, is_standard_name, ndCounts, pt, strides, pt_qs_mp):
'''
Build structure of timeseries/timeseriesprofile parameters organized by units
'''
PIXELS_WIDE = 800 # Approximate pixel width of parameter-time-flot window
units = {}
# Build units hash of parameter names for labeling axes in flot
for p,u in pa_units.iteritems():
logger.debug('is_standard_name = %s. p, u = %s, %s', is_standard_name, p, u)
if not self._parameterInSelection(p, is_standard_name):
logger.debug('Parameter is not in selection')
continue
try:
units[u] = units[u] + ' ' + p
except KeyError:
units[u] = p
# Apply either parameter name or standard_name to MeasuredParameter and Activity query sets
if is_standard_name[p]:
qs_mp = pt_qs_mp.filter(parameter__standard_name=p)
qs_awp = self.qs.filter(activityparameter__parameter__standard_name=p)
else:
qs_mp = pt_qs_mp.filter(parameter__name=p)
qs_awp = self.qs.filter(activityparameter__parameter__name=p)
qs_awp = qs_awp.filter(Q(activityresource__resource__value__icontains='timeseries') |
Q(activityparameter__parameter__parameterresource__resource__name__icontains='plotTimeSeriesDepth')).distinct()
try:
secondsperpixel = self.kwargs['secondsperpixel'][0]
except IndexError:
secondsperpixel = 1500 # Default is a 2-week view (86400 * 14 / 800)
except KeyError:
secondsperpixel = 1500 # Default is a 2-week view (86400 * 14 / 800)
logger.debug('--------------------p = %s, u = %s, is_standard_name[p] = %s', p, u, is_standard_name[p])
# Select each time series by Activity and test against secondsperpixel for deciding on min & max or stride selection
if not ndCounts[p]:
ndCounts[p] = 1 # Trajectories with plotTimeSeriesDepth will not have a nominal depth, set to 1 for calculation below
for a in qs_awp:
qs_mp_a = qs_mp.filter(measurement__instantpoint__activity__name=a.name)
ad = (a.enddate-a.startdate)
aseconds = ad.days * 86400 + ad.seconds
logger.debug('a.name = %s, a.startdate = %s, a.enddate %s, aseconds = %s, secondsperpixel = %s', a.name, a.startdate, a.enddate, aseconds, secondsperpixel)
if float(aseconds) > float(secondsperpixel):
# Multiple points of this activity can be displayed in the flot, get an appropriate stride
logger.debug('PIXELS_WIDE = %s, ndCounts[p] = %s', PIXELS_WIDE, ndCounts[p])
stride = qs_mp_a.count() / PIXELS_WIDE / ndCounts[p] # Integer factors -> integer result
if stride < 1:
stride = 1
logger.debug('Getting timeseries from MeasuredParameter table with stride = %s', stride)
strides[p][a.name] = stride
logger.debug('Adding timeseries for p = %s, a = %s', p, a)
pt = self._getParameterTimeFromMP(qs_mp_a, pt, pa_units, a, p, is_standard_name, stride)
else:
# Construct just two points for this activity-parameter using the min & max from the AP table
pt = self._getParameterTimeFromAP(pt, pa_units, a, p)
return (pt, units, strides)
def getParameterTime(self):
'''
Based on the current selected query criteria for activities, return the associated MeasuredParameter datavalue time series
values as a 2-tuple list inside a 3 level hash of featureType, units, and an "activity__name + nominal depth" key
for each line to be drawn by flot. The MeasuredParameter queries here can be costly. Only perform them if the
UI has request only 'parametertime' or if the Parameter tab is active in the UI as indicated by 'parametertab' in self.kwargs.
If part of the larger SummaryData request then return the structure with just counts set - a much cheaper query.
'''
pt = {}
units = {}
colors = {}
strides = {}
pa_units = {}
is_standard_name = {}
ndCounts = {}
colors = {}
counts = 0
# Look for platforms that have featureTypes ammenable for Parameter time series visualization
for plats in self.getPlatforms().values():
for platform in plats:
timeSeriesParmCount = 0
trajectoryParmCount = 0
if platform[3].lower() == 'timeseriesprofile' or platform[3].lower() == 'timeseries':
# Do cheap query to count the number of timeseriesprofile or timeseries parameters
timeSeriesParmCount = models.Parameter.objects.using(self.dbname).filter(
activityparameter__activity__activityresource__resource__name__iexact='featureType',
activityparameter__activity__activityresource__resource__value__iexact=platform[3].lower()
).distinct().count()
elif platform[3].lower() == 'trajectory':
# Count trajectory Parameters for which timeSeries plotting has been requested
trajectoryParmCount = models.Parameter.objects.using(self.dbname).filter(
activityparameter__activity__activityresource__resource__name__iexact='featureType',
activityparameter__activity__activityresource__resource__value__iexact=platform[3].lower(),
parameterresource__resource__name__iexact='plotTimeSeriesDepth',
).distinct().count()
counts += timeSeriesParmCount + trajectoryParmCount
if counts:
if 'parametertime' in self.kwargs['only'] or self.kwargs['parametertab']:
# Initialize structure organized by units for parameters left in the selection
logger.debug('Calling self._collectParameters() with platform = %s', platform)
pa_units, is_standard_name, ndCounts, pt, colors, strides = self._collectParameters(platform, pt,
pa_units, is_standard_name, ndCounts, strides, colors)
if pa_units:
# The base MeasuredParameter query set for existing UI selections
if not self.mpq.qs_mp:
self.mpq.buildMPQuerySet(*self.args, **self.kwargs)
self.mpq.initialQuery = self.initialQuery
# Perform more expensive query: start with no_order version of the MeasuredParameter query set
pt_qs_mp = self.mpq.qs_mp_no_order
logger.debug('Before self._buildParameterTime: pt = %s', pt.keys())
pt, units, strides = self._buildParameterTime(pa_units, is_standard_name, ndCounts, pt, strides, pt_qs_mp)
logger.debug('After self._buildParameterTime: pt = %s', pt.keys())
return({'pt': pt, 'units': units, 'counts': counts, 'colors': colors, 'strides': strides})
def getSampleDepthTime(self):
'''
Based on the current selected query criteria for activities, return the associated SampleDepth time series
values as a 2-tuple list. The name similarity to getSimpleDepthTime name is a pure coincidence.
'''
samples = []
if self.getSampleQS():
qs = self.getSampleQS().values_list(
'instantpoint__timevalue',
'depth',
'instantpoint__activity__name',
'name'
).order_by('instantpoint__timevalue')
for s in qs:
ems = int(1000 * to_udunits(s[0], 'seconds since 1970-01-01'))
# Kludgy handling of activity names - flot needs 2 items separated by a space to handle sample event clicking
if (s[2].find('_decim') != -1):
label = '%s %s' % (s[2].split('_decim')[0], s[3],) # Lop off '_decim.nc (stride=xxx)' part of name
elif (s[2].find(' ') != -1):
label = '%s %s' % (s[2].split(' ')[0], s[3],) # Lop off everything after a space in the activity name
else:
label = '%s %s' % (s[2], s[3],) # Show entire Activity name & sample name
rec = {'label': label, 'data': [[ems, '%.2f' % s[1]]]}
##logger.debug('Appending %s', rec)
samples.append(rec)
return(samples)
def getNetTowDepthTime(self):
'''
Based on the current selected query criteria for activities, return the associated NetTow time series
values as a 2 2-tuple list. Theses are like SampleDepthTime, but have a start and end time/depth.
The UI uses a different glyph which is why these are delivered in a separate structure.
The convention for NetTows is for one Sample per activity, therefore we can examine the attributes
of the activity to get the start and end time and min and max depths.
'''
nettows = []
nettow = models.SampleType.objects.using(self.dbname).filter(name__contains=NETTOW)
if self.getSampleQS() and nettow:
qs = self.getSampleQS().filter(sampletype=nettow).values_list(
'instantpoint__timevalue',
'depth',
'instantpoint__activity__name',
'name',
'instantpoint__activity__startdate',
'instantpoint__activity__enddate',
'instantpoint__activity__mindepth',
'instantpoint__activity__maxdepth',
).order_by('instantpoint__timevalue')
for s in qs:
s_ems = int(1000 * to_udunits(s[4], 'seconds since 1970-01-01'))
e_ems = int(1000 * to_udunits(s[5], 'seconds since 1970-01-01'))
# Kludgy handling of activity names - flot needs 2 items separated by a space to handle sample event clicking
if (s[2].find('_decim') != -1):
label = '%s %s' % (s[2].split('_decim')[0], s[3],) # Lop off '_decim.nc (stride=xxx)' part of name
elif (s[2].find(' ') != -1):
label = '%s %s' % (s[2].split(' ')[0], s[3],) # Lop off everything after a space in the activity name
else:
label = '%s %s' % (s[2], s[3],) # Show entire Activity name & sample name
rec = {'label': label, 'data': [[s_ems, '%.2f' % s[7]], [e_ems, '%.2f' % s[6]]]}
nettows.append(rec)
return(nettows)
def getActivityParameterHistograms(self):
'''
Based on the current selected query criteria for activities, return the associated histograms of the selected
parameters as a list of hashes, one hash per parameter with pairs of binlo and bincount for flot to make bar charts.
Order in a somewhat complicated nested structure of hashes of hashes that permit the jQuery client to properly
color and plot the data.
'''
aphHash = {}
pUnits = {}
showAllParameterValuesFlag = getShow_All_Parameter_Values(self.kwargs)
showSigmatParameterValuesFlag = getShow_Sigmat_Parameter_Values(self.kwargs)
showStandardnameParameterValuesFlag = getShow_StandardName_Parameter_Values(self.kwargs)
for pa in models.Parameter.objects.using(self.dbname).all():
# Apply (negative) logic on whether to continue with creating histograms based on checkboxes checked in the queryUI
if not showAllParameterValuesFlag:
if not showStandardnameParameterValuesFlag:
if not showSigmatParameterValuesFlag:
continue
elif pa.standard_name != 'sea_water_sigma_t':
continue
elif not pa.standard_name:
continue
histList = {}
binwidthList = {}
platformList = {}
activityList = {}
# Collect histograms organized by activity and platform names. The SQL execution is sequential, a query
# is executed for each parameter and here we organize by platform and activity.
for aph in self.getActivityParameterHistogramsQS().select_related().filter(
activityparameter__parameter=pa).values('activityparameter__activity__name',
'activityparameter__activity__platform__name', 'binlo', 'binhi', 'bincount').order_by(
'activityparameter__activity__platform__name', 'activityparameter__activity__name', 'binlo'):
# Save histogram data by activity name
if np.isnan(aph['binlo']) or np.isnan(aph['binhi']):
continue
try:
histList[aph['activityparameter__activity__name']].append([aph['binlo'], aph['bincount']])
except KeyError:
# First time seeing this activity name, create a list and add the first histogram point
histList[aph['activityparameter__activity__name']] = []
histList[aph['activityparameter__activity__name']].append([aph['binlo'], aph['bincount']])
binwidthList[aph['activityparameter__activity__name']] = []
binwidthList[aph['activityparameter__activity__name']] = aph['binhi'] - aph['binlo']
platformList[aph['activityparameter__activity__name']] = []
platformList[aph['activityparameter__activity__name']].append(aph['activityparameter__activity__platform__name'])
##logger.debug('pa.name = %s, aname = %s', pa.name, aph['activityparameter__activity__name'])
# Unwind the platformList to get activities by platform name
for an, pnList in platformList.iteritems():
##logger.debug('an = %s, pnList = %s', an, pnList)
for pn in pnList:
try:
activityList[pn].append(an)
except KeyError:
activityList[pn] = []
activityList[pn].append(an)
# Build the final data structure organized by platform -> activity
plHash = {}
for plat in activityList.keys():
##logger.debug('plat = %s', plat)
for an in activityList[plat]:
try:
plHash[plat][an] = {'binwidth': binwidthList[an], 'hist': histList[an]}
except KeyError:
plHash[plat] = {}
plHash[plat][an] = {'binwidth': binwidthList[an], 'hist': histList[an]}
# Assign histogram data to the hash keyed by parameter name
if plHash:
aphHash[pa.name] = plHash
pUnits[pa.name] = pa.units
# Make RGBA colors from the hex colors - needed for opacity in flot bars
rgbas = {}
for plats in self.getPlatforms().values():
for p in plats:
r,g,b = (p[2][:2], p[2][2:4], p[2][4:])
rgbas[p[0]] = 'rgba(%d, %d, %d, 0.4)' % (int(r,16), int(g,16), int(b,16))
return {'histdata': aphHash, 'rgbacolors': rgbas, 'parameterunits': pUnits}
def getParameterPlatformDatavaluePNG(self):
'''
Called when user interface has selected just one Parameter and just one Platform, in which case
produce a depth-time section plot for overlay on the flot plot. Return a png image file name for inclusion
in the AJAX response.
'''
# Check for parameter-plot-radio button being selected, which inherently ensures that a
# single parameter name is selected for plotting. The client code will also ensure that
# extra platforms measuring the same parameter name are filtered out in the selection so
# there's no need for this server code to check for just one platform in the selection.
parameterID = None
platformName = None
logger.debug('self.kwargs = %s', self.kwargs)
if 'parameterplot' in self.kwargs:
if self.kwargs['parameterplot'][0]:
parameterID = self.kwargs['parameterplot'][0]
parameter = models.Parameter.objects.using(self.request.META['dbAlias']).get(id=parameterID)
parameterGroups = getParameterGroups(self.request.META['dbAlias'], parameter)
if self.kwargs['parameterplot'][1]:
platformName = self.kwargs['parameterplot'][1]
if not parameterID or not platformName:
# With Plot radio button, must have parameterID and platformName
return None, None, 'Problem with getting parameter-plot-radio button info'
logger.debug('Instantiating Viz.MeasuredParameter............................................')
self.mpq.buildMPQuerySet(*self.args, **self.kwargs)
if SAMPLED in parameterGroups:
# The fourth item should be for SampledParameter if that is the group of the Parameter
cp = MeasuredParameter(self.kwargs, self.request, self.qs, self.mpq.qs_sp_no_order,
self.getParameterMinMax(pid=parameterID)['plot'], self.getSampleQS(), platformName,
parameterID, parameterGroups)
else:
cp = MeasuredParameter(self.kwargs, self.request, self.qs, self.mpq.qs_mp_no_order,
self.getParameterMinMax(pid=parameterID)['plot'], self.getSampleQS(), platformName,
parameterID, parameterGroups)
return cp.renderDatavaluesForFlot()
def getParameterParameterPNG(self):
'''
If at least the X and Y radio buttons are checked produce a scatter plot for delivery back to the client
'''
plotResults = None
if (self.kwargs.has_key('parameterparameter')):
px = self.kwargs['parameterparameter'][0]
py = self.kwargs['parameterparameter'][1]
pc = self.kwargs['parameterparameter'][3]
if (px and py):
# PQuery is used here so as to combine Measured and Sampled Parameters
if not self.pq.qs_mp:
self.pq.buildPQuerySet(*self.args, **self.kwargs)
# We have enough information to generate a 2D scatter plot
##if not self.pp: # ...png always gets called before ...x3d - unless we change the key names...
pMinMax = { 'x': self.getParameterMinMax(px, percentileAggregateType='extrema')['plot'],
'y': self.getParameterMinMax(py, percentileAggregateType='extrema')['plot'],
'c': self.getParameterMinMax(pc)['plot']}
logger.debug('pMinMax = %s', pMinMax)
if not pMinMax['x'] or not pMinMax['y']:
return '', 'Selected x and y axis parameters are not in filtered selection.'
self.pp = ParameterParameter(self.request, {'x': px, 'y': py, 'c': pc}, self.mpq, self.pq, pMinMax)
try:
ppPngFile, infoText, sql = self.pp.make2DPlot()
except PPDatabaseException as e:
return None, e.message, e.sql
plotResults = ppPngFile, infoText, sql
return plotResults
def getParameterParameterX3D(self):
'''
If at least the X, Y, and Z radio buttons are checked produce an X3D response for delivery back to the client
'''
x3dDict = None
if (self.kwargs.has_key('parameterparameter')):
px = self.kwargs['parameterparameter'][0]
py = self.kwargs['parameterparameter'][1]
pz = self.kwargs['parameterparameter'][2]
pc = self.kwargs['parameterparameter'][3]
logger.debug('px = %s, py = %s, pz = %s, pc = %s', px, py, pz, pc)
if (px and py and pz):
if not self.pq.qs_mp:
self.pq.buildPQuerySet(*self.args, **self.kwargs)
# We have enough information to generate X3D XML
pMinMax = { 'x': self.getParameterMinMax(px, percentileAggregateType='extrema')['plot'],
'y': self.getParameterMinMax(py, percentileAggregateType='extrema')['plot'],
'z': self.getParameterMinMax(pz, percentileAggregateType='extrema')['plot'],
'c': self.getParameterMinMax(pc)['plot'] }
if not pMinMax['x'] or not pMinMax['y'] or not pMinMax['z']:
return '', 'Selected x, y, z, c Parameters not in filtered selection.'
logger.debug('Instantiating Viz.PropertyPropertyPlots for X3D............................................')
self.pp = ParameterParameter(self.request, {'x': px, 'y': py, 'z': pz, 'c': pc}, self.mpq, self.pq, pMinMax)
try:
x3dDict = self.pp.makeX3D()
except DatabaseError as e:
return '', e
try:
x3dDict['sql'] += ';'
except TypeError:
return '', 'Selected x, y, z, c Parameters not in filtered selection.'
return x3dDict
def getMeasuredParameterX3D(self):
'''Returns dictionary of X3D elements for rendering by X3DOM
'''
x3dDict = None
if getShow_Geo_X3D_Data(self.kwargs):
if 'parameterplot' in self.kwargs:
if self.kwargs['parameterplot'][0]:
parameterID = self.kwargs['parameterplot'][0]
parameterGroups = getParameterGroups(self.request.META['dbAlias'],
models.Parameter.objects.using(self.request.META['dbAlias']
).get(id=parameterID))
try:
count = self.mpq.count()
logger.debug('count = %s', count)
except AttributeError:
logger.debug('Calling self.mpq.buildMPQuerySet()')
self.mpq.buildMPQuerySet(*self.args, **self.kwargs)
else:
logger.debug('self.mpq.qs_mp = %s', self.mpq.qs_mp)
try:
platformName = self.kwargs['parameterplot'][1]
except IndexError as e:
logger.warn(e)
platformName = None
logger.debug('Getting data values in X3D for platformName = %s', platformName)
mpdv = MeasuredParameter(self.kwargs, self.request, self.qs, self.mpq.qs_mp,
self.getParameterMinMax()['plot'], self.getSampleQS(),
platformName, parameterID, parameterGroups)
# Default vertical exaggeration is 10x
x3dDict = mpdv.dataValuesX3D(float(self.request.GET.get('ve', 10)))
return x3dDict
def getPlatformAnimation(self):
'''
Based on the current selected query criteria for activities,
return the associated PlatformAnimation time series of X3D scene graph.
If roll, pitch and yaw exist as the platform standard names include
orienation angles, otherwise returns just the position animation scene.
'''
orientDict = {}
if self.request.GET.get('showplatforms', False):
try:
count = self.mpq.count()
except AttributeError:
self.mpq.buildMPQuerySet(*self.args, **self.kwargs)
# Test if there are any X3D platform models in the selection
platformsHavingModels = {pr.platform for pr in models.PlatformResource.objects.using(
self.dbname).filter(resource__resourcetype__name=X3DPLATFORMMODEL,
platform__in=[a.platform for a in self.qs])}
platforms_trajectories = {ar.activity.platform for ar in models.ActivityResource.objects.using(
self.dbname).filter(resource__name='featureType', resource__value='trajectory',
activity__platform__in=[a.platform for a in self.qs])}
platforms_to_animate = platformsHavingModels & platforms_trajectories
if platforms_to_animate:
# Use qs_mp_no_parm QuerySet as it contains roll, pitch, and yaw values
mppa = PlatformAnimation(platforms_to_animate, self.kwargs,
self.request, self.qs, self.mpq.qs_mp_no_parm)
# Default vertical exaggeration is 10x and default geoorigin is empty string
orientDict = mppa.platformAnimationDataValuesForX3D(
float(self.request.GET.get('ve', 10)),
self.request.GET.get('geoorigin', ''),
scale=1000, speedup=10)
return orientDict
def getParameterPlatforms(self):
'''
Retrun hash of parmameter ids (keys) and the platforms (a list) that measured/sampled them
'''
ppHash = {}
for ap in models.ActivityParameter.objects.using(self.dbname).filter(activity__in=self.qs).values('parameter__id', 'activity__platform__name').distinct():
try:
ppHash[ap['parameter__id']].append(ap['activity__platform__name'])
except KeyError:
ppHash[ap['parameter__id']] = []
ppHash[ap['parameter__id']].append(ap['activity__platform__name'])
return ppHash
def getX3DTerrains(self):
'''
Query Resources to get any X3D Terrain information for this Campaign and return as a hash for the STOQS UI to use
'''
x3dtHash = {}
try:
for r in models.Resource.objects.using(self.dbname).filter(resourcetype__name='x3dterrain').all():
try:
x3dtHash[r.uristring][r.name] = r.value
except KeyError:
x3dtHash[r.uristring] = {}
x3dtHash[r.uristring][r.name] = r.value
except DatabaseError as e:
logger.warn('No resourcetype__name of x3dterrain in %s: %s', self.dbname, e)
return x3dtHash
def getX3DPlaybacks(self):
'''
Query Resources to get any X3D Playback information for the Activities remaining in the selection
'''
x3dpHash = {}
try:
for r in models.Resource.objects.using(self.dbname).filter(resourcetype__name='x3dplayback').values(
'uristring', 'name', 'value', 'activityresource__activity__name'):
ms = models.Measurement.objects.using(self.dbname).filter(instantpoint__activity__name=r['activityresource__activity__name'])
try:
x3dpHash[r['uristring']][r['name']] = r['value']
x3dpHash[r['uristring']]['startGeoCoords'] = '%s %s %s' % (ms[0].geom.y, ms[0].geom.x, -ms[0].depth)
except KeyError:
x3dpHash[r['uristring']] = {}
x3dpHash[r['uristring']][r['name']] = r['value']
x3dpHash[r['uristring']]['startGeoCoords'] = '%s %s %s' % (ms[0].geom.y, ms[0].geom.x, -ms[0].depth)
except DatabaseError as e:
logger.warn('No resourcetype__name of x3dplayback in %s: %s', self.dbname, e)
return x3dpHash
def getResources(self):
'''
Query ActivityResources for Resources remaining in Activity selection
'''
netcdfHash = {}
# Simple name/value attributes
for ar in models.ActivityResource.objects.using(self.dbname).filter(activity__in=self.qs
,resource__name__in=['title', 'summary', 'opendap_url']
).values('activity__platform__name', 'activity__name', 'activity__comment', 'resource__name', 'resource__value'):
try:
netcdfHash[ar['activity__platform__name']][ar['activity__name']][ar['resource__name']] = ar['resource__value']
netcdfHash[ar['activity__platform__name']][ar['activity__name']]['comment'] = ar['activity__comment']
except KeyError:
try:
netcdfHash[ar['activity__platform__name']][ar['activity__name']] = {}
except KeyError:
netcdfHash[ar['activity__platform__name']] = {}
netcdfHash[ar['activity__platform__name']][ar['activity__name']] = {}
netcdfHash[ar['activity__platform__name']][ar['activity__name']][ar['resource__name']] = ar['resource__value']
netcdfHash[ar['activity__platform__name']][ar['activity__name']]['comment'] = ar['activity__comment']
# Quick Look plots
qlHash = {}
for ar in models.ActivityResource.objects.using(self.dbname).filter(activity__in=self.qs, resource__resourcetype__name='quick_look').values(
'activity__platform__name', 'activity__name', 'resource__name', 'resource__uristring'):
try:
qlHash[ar['activity__platform__name']][ar['activity__name']][ar['resource__name']] = ar['resource__uristring']
except KeyError:
try:
qlHash[ar['activity__platform__name']][ar['activity__name']] = {}
except KeyError:
qlHash[ar['activity__platform__name']] = {}
qlHash[ar['activity__platform__name']][ar['activity__name']] = {}
qlHash[ar['activity__platform__name']][ar['activity__name']][ar['resource__name']] = ar['resource__uristring']
return {'netcdf': netcdfHash, 'quick_look': qlHash}
def getAttributes(self):
'''
Query for "Attributes" which are specific ResourceTypes or fields of other classes. Initially for tagged measurements
and for finding comments about Samples, but can encompass any other way a STOQS database may be filtered os searched.
'''
measurementHash = {}
sources = models.ResourceResource.objects.using(self.dbname).filter(toresource__name=COMMANDLINE
).values_list('fromresource__resourcetype__name', 'toresource__value').distinct()
if sources:
measurementHash['commandlines'] = dict((s[0], s[1]) for s in sources)
for mpr in models.MeasuredParameterResource.objects.using(self.dbname).filter(activity__in=self.qs
,resource__name__in=[LABEL]).values( 'resource__resourcetype__name', 'resource__value',
'resource__id').distinct().order_by('resource__value'):
# Include all description resources associated with this label
descriptions = ' '.join(models.ResourceResource.objects.using(self.dbname).filter(fromresource__id=mpr['resource__id'],
toresource__name=DESCRIPTION).values_list('toresource__value', flat=True))
try:
measurementHash[mpr['resource__resourcetype__name']].append((mpr['resource__id'], mpr['resource__value'], descriptions))
except KeyError:
measurementHash[mpr['resource__resourcetype__name']] = []
measurementHash[mpr['resource__resourcetype__name']].append((mpr['resource__id'], mpr['resource__value'], descriptions))
return {'measurement': measurementHash}
#
# Methods that generate Q objects used to populate the query.
#
def _sampledparametersgroupQ(self, parameterid, fromTable='Activity'):
'''
Build a Q object to be added to the current queryset as a filter. This should
ensure that our result doesn't contain any parameter names that were not selected.
We use id for sampledparametersgroup as the name may contain special characters.
'''
q = Q()
if parameterid is None:
return q
else:
if fromTable == 'Activity':
q = Q(activityparameter__parameter__id__in=parameterid)
elif fromTable == 'Sample':
q = Q(sampledparameter__parameter__id__in=parameterid)
elif fromTable == 'ActivityParameter':
q = Q(parameter__id__in=parameterid)
elif fromTable == 'ActivityParameterHistogram':
q = Q(activityparameter__parameter__id__in=parameterid)
return q
def _measuredparametersgroupQ(self, parametername, fromTable='Activity'):
'''
Build a Q object to be added to the current queryset as a filter. This should
ensure that our result doesn't contain any parameter names that were not selected.
'''
q = Q()
if parametername is None:
return q
else:
if fromTable == 'Activity':
q = Q(activityparameter__parameter__name__in=parametername)
elif fromTable == 'Sample':
# Use sub-query to find all Samples from Activities that are in the existing Activity queryset
# Note: must do the monkey patch in __init__() so that Django's django/db/models/sql/query.py
# statement "sql, params = self.get_compiler(DEFAULT_DB_ALIAS).as_sql()" uses the right connection.
# This is not a Django bug according to source code comment at:
# https://github.com/django/django/blob/master/django/db/models/sql/query.py
q = Q(instantpoint__activity__in=self.qs)
elif fromTable == 'ActivityParameter':
# Use sub-query to restrict ActivityParameters to those that are in the list of Activities in the selection
q = Q(activity__in=self.qs)
elif fromTable == 'ActivityParameterHistogram':
# Use sub-query to find all ActivityParameterHistogram from Activities that are in the existing Activity queryset
q = Q(activityparameter__activity__in=self.qs)
return q
def _parameterstandardnameQ(self, parameterstandardname, fromTable='Activity'):
'''
Build a Q object to be added to the current queryset as a filter. This should
ensure that our result doesn't contain any parameter standard_names that were not selected.
'''
q = Q()
if parameterstandardname is None:
return q
else:
if fromTable == 'Activity':
q = Q(activityparameter__parameter__standard_name__in=parameterstandardname)
elif fromTable == 'Sample':
# Use sub-query to find all Samples from Activities that are in the existing Activity queryset
q = Q(instantpoint__activity__in=self.qs)
elif fromTable == 'ActivityParameter':
q = Q(activity__in=self.qs)
elif fromTable == 'ActivityParameterHistogram':
# Use sub-query to find all ActivityParameterHistogram from Activities that are in the existing Activity queryset
q = Q(activityparameter__activity__in=self.qs)
return q
def _platformsQ(self, platforms, fromTable='Activity'):
'''
Build a Q object to be added to the current queryset as a filter. This will ensure that we
only generate the other values/sets for platforms that were selected.
'''
q = Q()
if platforms is None:
return q
else:
if fromTable == 'Activity':
q = Q(platform__name__in=platforms)
elif fromTable == 'Sample':
# Use sub-query to find all Samples from Activities that are in the existing Activity queryset
q = Q(instantpoint__activity__in=self.qs)
elif fromTable == 'ActivityParameter':
q = Q(activity__in=self.qs)
elif fromTable == 'ActivityParameterHistogram':
# Use sub-query to find all ActivityParameterHistogram from Activities that are in the existing Activity queryset
q = Q(activityparameter__activity__in=self.qs)
return q
def _timeQ(self, times, fromTable='Activity'):
'''
Build a Q object to be added to the current queryset as a filter. This ensures that we limit
things down based on the time range selected by the user.
'''
q = Q()
if not times:
return q
if times[0] is not None:
if fromTable == 'Activity':
q = Q(enddate__gte=times[0])
elif fromTable == 'Sample':
q = Q(instantpoint__timevalue__gte=times[0])
elif fromTable == 'ActivityParameter':
q = Q(activity__enddate__gte=times[0])
elif fromTable == 'ActivityParameterHistogram':
q = Q(activityparameter__activity__enddate__gte=times[0])
if times[1] is not None:
if fromTable == 'Activity':
q = q & Q(startdate__lte=times[1])
elif fromTable == 'Sample':
q = q & Q(instantpoint__timevalue__lte=times[1])
elif fromTable == 'ActivityParameter':
q = q & Q(activity__startdate__lte=times[1])
elif fromTable == 'ActivityParameterHistogram':
q = q & Q(activityparameter__activity__startdate__lte=times[1])
return q
def _depthQ(self, depth, fromTable='Activity'):
'''
Build a Q object to be added to the current queryset as a filter. Once again, we want
to make sure that we only generate the "leftover" components based on the selected depth
range.
'''
q = Q()
if not depth:
return q
if depth[0] is not None:
if fromTable == 'Activity':
q = Q(maxdepth__gte=depth[0])
elif fromTable == 'Sample':
q = Q(depth__gte=depth[0])
elif fromTable == 'ActivityParameter':
q = Q(activity__maxdepth__gte=depth[0])
elif fromTable == 'ActivityParameterHistogram':
q = Q(activityparameter__activity__maxdepth__gte=depth[0])
if depth[1] is not None:
if fromTable == 'Activity':
q = q & Q(mindepth__lte=depth[1])
elif fromTable == 'Sample':
q = q & Q(depth__lte=depth[1])
elif fromTable == 'ActivityParameter':
q = q & Q(activity__mindepth__lte=depth[1])
elif fromTable == 'ActivityParameterHistogram':
q = q & Q(activityparameter__activity__mindepth__lte=depth[1])
return q
def _mplabelsQ(self, resourceids, fromTable='Activity'):
'''
Build a Q object to be added to the current queryset as a filter. This will ensure that we
only generate the other values/sets for attributes (initially resources that have names of 'label'
that are MeasuredParameter labels) that were selected.
'''
q = Q()
if not resourceids:
return q
else:
if fromTable == 'Activity':
q = Q(id__in=models.MeasuredParameterResource.objects.using(self.dbname).filter(
resource__id__in=resourceids).values_list('activity__id').distinct())
elif fromTable == 'ActivityParameter':
q = Q(activity__id__in=models.MeasuredParameterResource.objects.using(self.dbname).filter(
resource__id__in=resourceids).values_list('activity__id').distinct())
return q
def _trajectoryQ(self):
'''
Return Q object that is True if the activity is of featureType trajectory
'''
# Restrict selection to Activities that are trajectories. Can have pre CF-1.6 UCDD and CF-1.6 and later metadata.
udcc_q1 = Q(activityresource__resource__name__iexact='thredds_data_type') & Q(activityresource__resource__value__iexact='Trajectory')
udcc_q2 = Q(activityresource__resource__name__iexact='cdm_data_type') & Q(activityresource__resource__value__iexact='trajectory')
udcc_q3 = Q(activityresource__resource__name__iexact='CF%3afeatureType') & Q(activityresource__resource__value__iexact='trajectory')
udcc_q4 = Q(activityresource__resource__name__iexact='CF_featureType') & Q(activityresource__resource__value__iexact='trajectory')
cf16_q = Q(activityresource__resource__name__iexact='featureType') & Q(activityresource__resource__value__iexact='trajectory')
q = (udcc_q1 | udcc_q2 | udcc_q3 | udcc_q4 | cf16_q)
return q
def _timeSeriesQ(self):
'''
Return Q object that is True if the activity is of featureType timeSeries
'''
# Restrict selection to Activities that are trajectories. Can have pre CF-1.6 UCDD and CF-1.6 and later metadata.
udcc_q1 = Q(activityresource__resource__name__iexact='thredds_data_type') & Q(activityresource__resource__value__iexact='station')
udcc_q2 = Q(activityresource__resource__name__iexact='cdm_data_type') & Q(activityresource__resource__value__iexact='station')
cf16_q = Q(activityresource__resource__name__iexact='featureType') & Q(activityresource__resource__value__iexact='timeSeries')
q = (udcc_q1 | udcc_q2 | cf16_q)
return q
def _timeSeriesProfileQ(self):
'''
Return Q object that is True if the activity is of featureType timeSeries
'''
# Restrict selection to Activities that are trajectories. Can have pre CF-1.6 UCDD and CF-1.6 and later metadata.
udcc_q1 = Q(activityresource__resource__name__iexact='thredds_data_type') & Q(activityresource__resource__value__iexact='station')
udcc_q2 = Q(activityresource__resource__name__iexact='cdm_data_type') & Q(activityresource__resource__value__iexact='station')
cf16_q = Q(activityresource__resource__name__iexact='featureType') & Q(activityresource__resource__value__iexact='timeSeriesProfile')
q = (udcc_q1 | udcc_q2 | cf16_q)
return q
def _trajectoryProfileQ(self):
'''
Return Q object that is True if the activity is of featureType timeSeries
'''
# Restrict selection to Activities that are trajectoryProfiles - a featureType new in CF-1.6
cf16_q = Q(activityresource__resource__name__iexact='featureType') & Q(activityresource__resource__value__iexact='trajectoryProfile')
q = (cf16_q)
return q
#
# Methods to get the query used based on the current Q object.
#
def getSQLWhere(self):
'''
This method will generate a pseudo-query, and then normalize it to a standard SQL query. While for
PostgreSQL this is usually the actual query, we might need to massage it a bit to handle quoting
issues and such. The string representation of the queryset's query attribute gives us the query.
This is really useful when we want to generate a new mapfile based on the current query result. We just want
the WHERE clause of the query, since that's where the predicate exists.
'''
querystring = str(self.qs.query)
return querystring
def getActivityGeoQuery(self, Q_object = None, pointFlag=False):
'''
This method generates a string that can be put into a Mapserver mapfile DATA statment.
It is for returning Activities. If @param pointFlag is True then postgresifySQL() will
deliver the mappoint field as geom, otherwise it will deliver maptrack (trajectory) as geom.
'''
qs = self.qs
# Add any more filters (Q objects) if specified
if Q_object:
qs = qs.filter(Q_object)
# Query for mapserver
geo_query = 'geom from (%s) as subquery using unique gid using srid=4326' % postgresifySQL(qs.query, pointFlag).rstrip()
return geo_query
def getSampleGeoQuery(self, Q_object = None):
'''
This method generates a string that can be put into a Mapserver mapfile DATA statment.
It is for returning Samples.
'''
qs = self.sample_qs
if not qs:
return ''
# Add any more filters (Q objects) if specified
if Q_object:
qs = self.sample_qs.using(self.dbname).filter(Q_object)
# Query for mapserver
geo_query = 'geom from (%s) as subquery using unique gid using srid=4326' % postgresifySQL(qs.query, sampleFlag=True)
logger.debug('geo_query = %s', geo_query)
return geo_query
def getSampleExtent(self, geoqueryset, srid=4326):
"""
Accepts a GeoQuerySet and SRID.
Returns the extent as a GEOS object in the Google Maps projection.
The result can be directly passed out for direct use in OpenLayers.
"""
area = geoqueryset.area()
extent = fromstr('MULTIPOINT (%s %s, %s %s)' % geoqueryset.extent(), srid=srid)
ul = extent[0]
lr = extent[1]
dist = ul.distance(lr)
# if the points are all in one location then expand the extent so openlayers
# will zoom to something that is visible
if not dist:
ul.x = ul.x-0.15
ul.y = ul.y+0.15
lr.x = lr.x+0.15
lr.y = lr.y-0.15
extent = MultiPoint(ul,lr)
extent.srid = srid
extent.transform(self.spherical_mercator_srid)
return extent
def getExtent(self, srid=4326, outputSRID=spherical_mercator_srid):
'''
Return GEOSGeometry extent of all the geometry contained in the Activity and Sample geoquerysets.
The result can be directly passed out for direct use in a OpenLayers.
'''
extent = None
# Check all geometry types encountered in Activity GeoQuerySet in priority order
extentList = []
for geom_field in (('maptrack', 'mappoint', 'plannedtrack')):
try:
extentList.append(self.qs.extent(field_name=geom_field))
except DatabaseError:
logger.warn('Database %s does not have field %s', self.dbname, geom_field)
except TypeError:
pass
##logger.debug('Field %s is Null in Activity GeoQuerySet: %s', geom_field, str(self.qs) )
# Append the Sample geometries
try:
sqs = self.getSampleQS()
extentList.append(sqs.extent(field_name='geom'))
except:
logger.debug('Could not get an extent for Sample GeoQuerySet')
# Take the union of all geometry types found in Activities and Samples
logger.debug("Collected %d geometry extents from Activities and Samples", len(extentList))
if extentList:
logger.debug('extentList = %s', extentList)
# Initialize geom_union with first not None extent
for index, ext in enumerate(extentList):
if ext is not None:
geom_union = fromstr('LINESTRING (%s %s, %s %s)' % ext, srid=srid)
break
# Union additional extents
for extent in extentList[index:]:
if extent is not None:
if extent[0] == extent[2] and extent[1] == extent[3]:
logger.debug('Unioning extent = %s as a POINT', extent)
geom_union = geom_union.union(fromstr('POINT (%s %s)' % extent[:2], srid=srid))
else:
logger.debug('Unioning extent = %s as a LINESTRING', extent)
geom_union = geom_union.union(fromstr('LINESTRING (%s %s, %s %s)' % extent, srid=srid))
# Aggressive try/excepts done here for better reporting on the production servers
try:
logger.debug('Final geom_union = %s', geom_union)
except UnboundLocalError:
logger.exception('geom_union could not be set from extentList = %s', extentList)
return ([], None, None)
try:
geomstr = 'LINESTRING (%s %s, %s %s)' % geom_union.extent
except TypeError:
logger.exception('Tried to get extent for self.qs.query = %s, but failed. Check the database loader and make sure a geometry type (maptrack or mappoint) is assigned for each activity.', str(self.qs.query))
except ValueError:
logger.exception('Tried to get extent for self.qs.query = %s, but failed. Check the database loader and make sure a geometry type (maptrack or mappoint) is assigned for each activity.', str(self.qs.query))
else:
logger.debug('geomstr = %s', geomstr)
try:
extent = fromstr(geomstr, srid=srid)
except:
logger.exception('Could not get extent for geomstr = %s, srid = %d', geomstr, srid)
# Compute midpoint of extent for use in GeoViewpoint for Oculus Rift viewpoint setting
lon_midpoint = (extent[0][0] + extent[1][0]) / 2.0
lat_midpoint = (extent[0][1] + extent[1][1]) / 2.0
try:
extent.transform(outputSRID)
except:
logger.exception('Cannot get transorm to %s for geomstr = %s, srid = %d', outputSRID, geomstr, srid)
return (extent, lon_midpoint, lat_midpoint)
| josephmfaulkner/stoqs | stoqs/utils/STOQSQManager.py | Python | gpl-3.0 | 105,831 |
from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
datatypes_repository_name = 'blast_datatypes_0120'
datatypes_repository_description = 'Galaxy applicable datatypes for BLAST'
datatypes_repository_long_description = 'Galaxy datatypes for the BLAST top hit descriptons tool'
tool_repository_name = 'blastxml_to_top_descr_0120'
tool_repository_description = 'BLAST top hit descriptions'
tool_repository_long_description = 'Make a table from BLAST XML'
'''
Tool shed side:
1) Create and populate blast_datatypes_0120.
1a) Check for appropriate strings.
2) Create and populate blastxml_to_top_descr_0120.
2a) Check for appropriate strings.
3) Upload repository_dependencies.xml to blastxml_to_top_descr_0120 that defines a relationship to blast_datatypes_0120.
3a) Check for appropriate strings.
'''
base_datatypes_count = 0
repository_datatypes_count = 0
class TestRepositoryMultipleOwners( ShedTwillTestCase ):
def test_0000_initiate_users( self ):
"""Create necessary user accounts and login as an admin user."""
"""
Create all the user accounts that are needed for this test script to run independently of other tests.
Previously created accounts will not be re-created.
"""
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
test_user_1 = self.test_db_util.get_user( common.test_user_1_email )
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
test_user_1_private_role = self.test_db_util.get_private_role( test_user_1 )
self.logout()
self.login( email=common.test_user_2_email, username=common.test_user_2_name )
test_user_2 = self.test_db_util.get_user( common.test_user_1_email )
assert test_user_2 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_2_email
test_user_2_private_role = self.test_db_util.get_private_role( test_user_2 )
self.logout()
self.login( email=common.admin_email, username=common.admin_username )
admin_user = self.test_db_util.get_user( common.admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
admin_user_private_role = self.test_db_util.get_private_role( admin_user )
def test_0005_create_datatypes_repository( self ):
"""Create and populate the blast_datatypes_0120 repository"""
"""
We are at step 1.
Create and populate blast_datatypes.
"""
category = self.create_category( name='Test 0120', description='Description of test 0120' )
self.logout()
self.login( email=common.test_user_2_email, username=common.test_user_2_name )
strings_displayed = [ 'Repository %s' % "'%s'" % datatypes_repository_name,
'Repository %s has been created' % "<b>%s</b>" % datatypes_repository_name ]
repository = self.get_or_create_repository( name=datatypes_repository_name,
description=datatypes_repository_description,
long_description=datatypes_repository_long_description,
owner=common.test_user_2_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=strings_displayed )
self.upload_file( repository,
filename='blast/blast_datatypes.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded blast_datatypes tarball.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0010_verify_datatypes_repository( self ):
'''Verify the blast_datatypes_0120 repository.'''
'''
We are at step 1a.
Check for appropriate strings, most importantly BlastXml, BlastNucDb, and BlastProtDb,
the datatypes that are defined in datatypes_conf.xml.
'''
global repository_datatypes_count
repository = self.test_db_util.get_repository_by_name_and_owner( datatypes_repository_name, common.test_user_2_name )
strings_displayed = [ 'BlastXml', 'BlastNucDb', 'BlastProtDb', 'application/xml', 'text/html', 'blastxml', 'blastdbn', 'blastdbp']
self.display_manage_repository_page( repository, strings_displayed=strings_displayed )
repository_datatypes_count = int( self.get_repository_datatypes_count( repository ) )
def test_0015_create_tool_repository( self ):
"""Create and populate the blastxml_to_top_descr_0120 repository"""
"""
We are at step 2.
Create and populate blastxml_to_top_descr_0120.
"""
category = self.create_category( name='Test 0120', description='Description of test 0120' )
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
strings_displayed = [ 'Repository %s' % "'%s'" % tool_repository_name,
'Repository %s has been created' % "<b>%s</b>" % tool_repository_name ]
repository = self.get_or_create_repository( name=tool_repository_name,
description=tool_repository_description,
long_description=tool_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=strings_displayed )
self.upload_file( repository,
filename='blast/blastxml_to_top_descr.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded blastxml_to_top_descr tarball.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0020_verify_tool_repository( self ):
'''Verify the blastxml_to_top_descr_0120 repository.'''
'''
We are at step 2a.
Check for appropriate strings, such as tool name, description, and version.
'''
repository = self.test_db_util.get_repository_by_name_and_owner( tool_repository_name, common.test_user_1_name )
strings_displayed = [ 'blastxml_to_top_descr_0120', 'BLAST top hit descriptions', 'Make a table from BLAST XML' ]
strings_displayed.extend( [ '0.0.1', 'Valid tools'] )
self.display_manage_repository_page( repository, strings_displayed=strings_displayed )
def test_0025_create_repository_dependency( self ):
'''Create a repository dependency on blast_datatypes_0120.'''
'''
We are at step 3.
Create a simple repository dependency for blastxml_to_top_descr_0120 that defines a dependency on blast_datatypes_0120.
'''
datatypes_repository = self.test_db_util.get_repository_by_name_and_owner( datatypes_repository_name, common.test_user_2_name )
tool_repository = self.test_db_util.get_repository_by_name_and_owner( tool_repository_name, common.test_user_1_name )
dependency_xml_path = self.generate_temp_path( 'test_0120', additional_paths=[ 'dependencies' ] )
datatypes_tuple = ( self.url, datatypes_repository.name, datatypes_repository.user.username, self.get_repository_tip( datatypes_repository ) )
self.create_repository_dependency( repository=tool_repository, repository_tuples=[ datatypes_tuple ], filepath=dependency_xml_path )
def test_0040_verify_repository_dependency( self ):
'''Verify the created repository dependency.'''
'''
We are at step 3a.
Check the newly created repository dependency to ensure that it was defined and displays correctly.
'''
datatypes_repository = self.test_db_util.get_repository_by_name_and_owner( datatypes_repository_name, common.test_user_2_name )
tool_repository = self.test_db_util.get_repository_by_name_and_owner( tool_repository_name, common.test_user_1_name )
self.check_repository_dependency( tool_repository, datatypes_repository )
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/test/tool_shed/functional/test_0120_simple_repository_dependency_multiple_owners.py | Python | gpl-3.0 | 8,820 |
import json
import pprint
from a2qt import QtWidgets
from a2widget.key_value_table import KeyValueTable
from a2widget.a2text_field import A2CodeField
_DEMO_DATA = {
'Name': 'Some Body',
'Surname': 'Body',
'Street. Nr': 'Thingstreet 8',
'Street': 'Thingstreet',
'Nr': '8',
'PLZ': '12354',
'City': 'Frankfurt am Main',
'Phone+': '+1232222222',
'Phone': '2222222',
'Country': 'Germany',
}
class Demo(QtWidgets.QMainWindow):
def __init__(self):
super(Demo, self).__init__()
w = QtWidgets.QWidget(self)
self.setCentralWidget(w)
lyt = QtWidgets.QVBoxLayout(w)
self.key_value_table = KeyValueTable(self)
self.key_value_table.changed.connect(self.table_to_code)
lyt.addWidget(self.key_value_table)
btn = QtWidgets.QPushButton('GET DATA')
btn.clicked.connect(self.get_data)
lyt.addWidget(btn)
self.text_field = A2CodeField(self)
self.text_field.text_changed.connect(self.code_to_table)
lyt.addWidget(self.text_field)
btn = QtWidgets.QPushButton('SET DATA')
btn.clicked.connect(self.set_data)
lyt.addWidget(btn)
self.text_field.setText(json.dumps(_DEMO_DATA, indent=2))
self.set_data()
def table_to_code(self):
data = self.key_value_table.get_data()
self.text_field.setText(json.dumps(data, indent=2))
def code_to_table(self):
data = json.loads(self.text_field.text())
self.key_value_table.set_silent(data)
def get_data(self):
data = self.key_value_table.get_data()
print(data)
pprint.pprint(data, sort_dicts=False)
def set_data(self):
data = json.loads(self.text_field.text())
self.key_value_table.set_data(data)
def show():
app = QtWidgets.QApplication([])
win = Demo()
win.show()
app.exec()
if __name__ == '__main__':
show()
| ewerybody/a2 | ui/a2widget/demo/key_value_table_demo.py | Python | gpl-3.0 | 1,931 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@date: 2017-04-06
@author: Heysion Yuan
@copyright: 2017, Heysion Yuan <heysions@gmail.com>
@license: GPLv3
'''
from dab.webui import WebBase
from dab.core.db.models import Task
class TaskIndex(WebBase):
def get(self):
dataset = self.get_task_top_all()
self.render("taskindex.html",tasklist=dataset)
# task_items = [
# {"id":1,"name":"deepin-auto-build","createtime":"2017","state":"success","resultinfo":"info"},
# {"id":2,"name":"deepin-auto-build","createtime":"2017","state":"success","resultinfo":"info"}
# ]
# self.render("task.html", tasklist=task_items)
# pass
def get_task_top_all(self):
dataset = Task.select(Task.id,Task.src_name,
Task.create_time,Task.state,
Task.owner_name)
datalist = []
if dataset :
for data in dataset:
datalist.append({"id":data.id,"name":data.name,
"ceatetime":data.createtime,"state":data.state,
"resultinfo":data.ower_name})
return datalist
class TaskInfo(WebBase):
pass
class TaskNew(WebBase):
def get(self):
self.render("tasknew.html")
pass
def post(self):
req_data = { k: self.get_argument(k) for k in self.request.arguments }
if not ("arches" in req_data.keys()):
self.render("404.html")
if not ("name" in req_data and req_data["name"] is not None) :
self.render("404.html")
self.save_new_task(req_data)
self.render("/taskindex")
def save_new_task(self,data):
new_task = Task.select(Task.name).where(Task.name==data["name"])
if not new_task :
new_task = Task.create(name=data["name"],
suite=data["suite"],
codename=data["codename"],
architectures=data["arches"],
workdir=data["workbase"],
description=data["description"])
new_task.save()
else:
return None
return new_task
| heysion/deepin-auto-build | dab/webui/taskctrl.py | Python | gpl-3.0 | 2,297 |
#
# Copyright (C) 2003-2022 Sébastien Helleu <flashcode@flashtux.org>
#
# This file is part of WeeChat.org.
#
# WeeChat.org is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# WeeChat.org is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WeeChat.org. If not, see <https://www.gnu.org/licenses/>.
#
"""Some useful views."""
from django.views.generic import TemplateView
class TextTemplateView(TemplateView):
"""View for a plain text file."""
def render_to_response(self, context, **response_kwargs):
response_kwargs['content_type'] = 'text/plain'
return super().render_to_response(context, **response_kwargs)
| weechat/weechat.org | weechat/common/views.py | Python | gpl-3.0 | 1,083 |
# coding=utf-8
# Author: Dennis Lutter <lad1337@gmail.com>
# Author: Jonathon Saine <thezoggy@gmail.com>
# URL: https://sickchill.github.io
#
# This file is part of SickChill.
#
# SickChill is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickChill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickChill. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, print_function, unicode_literals
# Stdlib Imports
import abc
import datetime
import io
import os
import re
import time
import traceback
# Third Party Imports
import six
# noinspection PyUnresolvedReferences
from six.moves import urllib
from tornado.web import RequestHandler
# First Party Imports
import sickbeard
import sickchill
from sickbeard import classes, db, helpers, image_cache, logger, network_timezones, sbdatetime, search_queue, ui
from sickbeard.common import (ARCHIVED, DOWNLOADED, FAILED, IGNORED, Overview, Quality, SKIPPED, SNATCHED, SNATCHED_PROPER, statusStrings, UNAIRED, UNKNOWN,
WANTED)
from sickbeard.postProcessor import PROCESS_METHODS
from sickbeard.versionChecker import CheckVersion
from sickchill.helper.common import dateFormat, dateTimeFormat, pretty_file_size, sanitize_filename, timeFormat, try_int
from sickchill.helper.encoding import ek
from sickchill.helper.exceptions import CantUpdateShowException, ex, ShowDirectoryNotFoundException
from sickchill.helper.quality import get_quality_string
from sickchill.media.ShowBanner import ShowBanner
from sickchill.media.ShowFanArt import ShowFanArt
from sickchill.media.ShowNetworkLogo import ShowNetworkLogo
from sickchill.media.ShowPoster import ShowPoster
from sickchill.show.ComingEpisodes import ComingEpisodes
from sickchill.show.History import History
from sickchill.show.Show import Show
from sickchill.system.Restart import Restart
from sickchill.system.Shutdown import Shutdown
try:
import json
except ImportError:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import simplejson as json
indexer_ids = ["indexerid", "tvdbid"]
RESULT_SUCCESS = 10 # only use inside the run methods
RESULT_FAILURE = 20 # only use inside the run methods
RESULT_TIMEOUT = 30 # not used yet :(
RESULT_ERROR = 40 # only use outside of the run methods !
RESULT_FATAL = 50 # only use in Api.default() ! this is the "we encountered an internal error" error
RESULT_DENIED = 60 # only use in Api.default() ! this is the access denied error
result_type_map = {
RESULT_SUCCESS: "success",
RESULT_FAILURE: "failure",
RESULT_TIMEOUT: "timeout",
RESULT_ERROR: "error",
RESULT_FATAL: "fatal",
RESULT_DENIED: "denied",
}
# basically everything except RESULT_SUCCESS / success is bad
# noinspection PyAbstractClass
class ApiHandler(RequestHandler):
""" api class that returns json results """
version = 5 # use an int since float-point is unpredictable
def __init__(self, *args, **kwargs):
super(ApiHandler, self).__init__(*args, **kwargs)
# def set_default_headers(self):
# self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
def get(self, *args, **kwargs):
kwargs = self.request.arguments
# noinspection PyCompatibility
for arg, value in six.iteritems(kwargs):
if len(value) == 1:
kwargs[arg] = value[0]
args = args[1:]
# set the output callback
# default json
output_callback_dict = {
'default': self._out_as_json,
'image': self._out_as_image,
}
access_msg = "API :: " + self.request.remote_ip + " - gave correct API KEY. ACCESS GRANTED"
logger.log(access_msg, logger.DEBUG)
# set the original call_dispatcher as the local _call_dispatcher
_call_dispatcher = self.call_dispatcher
# if profile was set wrap "_call_dispatcher" in the profile function
if 'profile' in kwargs:
from profilehooks import profile
_call_dispatcher = profile(_call_dispatcher, immediate=True)
del kwargs["profile"]
try:
out_dict = _call_dispatcher(args, kwargs)
except Exception as e: # real internal error oohhh nooo :(
logger.log("API :: " + ex(e), logger.ERROR)
error_data = {
"error_msg": ex(e),
"args": args,
"kwargs": kwargs
}
out_dict = _responds(RESULT_FATAL, error_data,
"SickChill encountered an internal error! Please report to the Devs")
if 'outputType' in out_dict:
output_callback = output_callback_dict[out_dict['outputType']]
else:
output_callback = output_callback_dict['default']
# noinspection PyBroadException
try:
self.finish(output_callback(out_dict))
except Exception:
pass
def _out_as_image(self, _dict):
self.set_header('Content-Type'.encode('utf-8'), _dict['image'].get_media_type())
return _dict['image'].get_media()
def _out_as_json(self, _dict):
self.set_header("Content-Type".encode('utf-8'), "application/json;charset=UTF-8")
try:
out = json.dumps(_dict, ensure_ascii=False, sort_keys=True)
callback = self.get_query_argument('callback', None) or self.get_query_argument('jsonp', None)
if callback:
out = callback + '(' + out + ');' # wrap with JSONP call if requested
except Exception as e: # if we fail to generate the output fake an error
logger.log("API :: " + traceback.format_exc(), logger.DEBUG)
out = '{{"result": "{0}", "message": "error while composing output: {1}"}}'.format(result_type_map[RESULT_ERROR], ex(e))
return out
def call_dispatcher(self, args, kwargs): # pylint:disable=too-many-branches
""" calls the appropriate CMD class
looks for a cmd in args and kwargs
or calls the TVDBShorthandWrapper when the first args element is a number
or returns an error that there is no such cmd
"""
logger.log("API :: all args: '" + str(args) + "'", logger.DEBUG)
logger.log("API :: all kwargs: '" + str(kwargs) + "'", logger.DEBUG)
commands = None
if args:
commands, args = args[0], args[1:]
commands = kwargs.pop("cmd", commands)
out_dict = {}
if commands:
commands = commands.split("|")
multi_commands = len(commands) > 1
for cmd in commands:
cur_args, cur_kwargs = self.filter_params(cmd, args, kwargs)
if len(cmd.split("_")) > 1:
cmd, cmd_index = cmd.split("_")
else:
cmd_index = None
logger.log("API :: " + cmd + ": cur_kwargs " + str(cur_kwargs), logger.DEBUG)
if not (cmd in ('show.getbanner', 'show.getfanart', 'show.getnetworklogo', 'show.getposter') and
multi_commands): # skip these cmd while chaining
try:
if cmd in function_mapper:
func = function_mapper.get(cmd) # map function
to_call = func(cur_args, cur_kwargs)
to_call.rh = self
cur_out_dict = to_call.run() # call function and get response
elif _is_int(cmd):
to_call = TVDBShorthandWrapper(cur_args, cur_kwargs, cmd)
to_call.rh = self
cur_out_dict = to_call.run()
else:
cur_out_dict = _responds(RESULT_ERROR, "No such cmd: '" + cmd + "'")
except ApiError as error: # Api errors that we raised, they are harmless
cur_out_dict = _responds(RESULT_ERROR, msg=ex(error))
else: # if someone chained one of the forbidden commands they will get an error for this one cmd
cur_out_dict = _responds(RESULT_ERROR, msg="The cmd '" + cmd + "' is not supported while chaining")
if multi_commands:
# note: if duplicate commands are issued and one has an index defined it will override
# all others or the other way around, depending on the command order
# THIS IS NOT A BUG!
if cmd_index: # do we need an index dict for this cmd ?
if cmd not in out_dict:
out_dict[cmd] = {}
out_dict[cmd][cmd_index] = cur_out_dict
else:
out_dict[cmd] = cur_out_dict
else:
out_dict = cur_out_dict
if multi_commands: # if we had multiple commands we have to wrap it in a response dict
out_dict = _responds(RESULT_SUCCESS, out_dict)
else: # index / no cmd given
out_dict = CMDSickBeard(args, kwargs).run()
return out_dict
@staticmethod
def filter_params(cmd, args, kwargs):
""" return only params kwargs that are for cmd
and rename them to a clean version (remove "<cmd>_")
args are shared across all commands
all args and kwargs are lowered
cmd are separated by "|" e.g. &cmd=shows|future
kwargs are name-spaced with "." e.g. show.indexerid=101501
if a kwarg has no namespace asking it anyways (global)
full e.g.
/api?apikey=1234&cmd=show.seasonlist_asd|show.seasonlist_2&show.seasonlist_asd.indexerid=101501&show.seasonlist_2.indexerid=79488&sort=asc
two calls of show.seasonlist
one has the index "asd" the other one "2"
the "indexerid" kwargs / params have the indexed cmd as a namespace
and the kwarg / param "sort" is a used as a global
"""
cur_args = []
for arg in args:
cur_args.append(arg.lower())
cur_args = tuple(cur_args)
cur_kwargs = {}
for kwarg in kwargs:
if kwarg.find(cmd + ".") == 0:
clean_key = kwarg.rpartition(".")[2]
cur_kwargs[clean_key] = kwargs[kwarg].lower()
elif "." not in kwarg: # the kwarg was not name-spaced therefore a "global"
cur_kwargs[kwarg] = kwargs[kwarg]
return cur_args, cur_kwargs
# noinspection PyAbstractClass
class ApiCall(ApiHandler):
_help = {"desc": "This command is not documented. Please report this to the developers."}
# noinspection PyMissingConstructor
def __init__(self, args, kwargs):
# TODO: Find out why this buggers up RequestHandler init if called
# super(ApiCall, self).__init__(args, kwargs)
self.rh = None
self.indexer = 1
self._missing = []
self._requiredParams = {}
self._optionalParams = {}
self.check_params(args, kwargs)
def run(self):
raise NotImplementedError()
def return_help(self):
for paramDict, paramType in [(self._requiredParams, "requiredParameters"),
(self._optionalParams, "optionalParameters")]:
if paramType in self._help:
for paramName in paramDict:
if paramName not in self._help[paramType]:
# noinspection PyUnresolvedReferences
self._help[paramType][paramName] = {}
if paramDict[paramName]["allowedValues"]:
# noinspection PyUnresolvedReferences
self._help[paramType][paramName]["allowedValues"] = paramDict[paramName]["allowedValues"]
else:
# noinspection PyUnresolvedReferences
self._help[paramType][paramName]["allowedValues"] = "see desc"
# noinspection PyUnresolvedReferences
self._help[paramType][paramName]["defaultValue"] = paramDict[paramName]["defaultValue"]
# noinspection PyUnresolvedReferences
self._help[paramType][paramName]["type"] = paramDict[paramName]["type"]
elif paramDict:
for paramName in paramDict:
self._help[paramType] = {}
# noinspection PyUnresolvedReferences
self._help[paramType][paramName] = paramDict[paramName]
else:
self._help[paramType] = {}
msg = "No description available"
if "desc" in self._help:
msg = self._help["desc"]
return _responds(RESULT_SUCCESS, self._help, msg)
def return_missing(self):
if len(self._missing) == 1:
msg = "The required parameter: '" + self._missing[0] + "' was not set"
else:
msg = "The required parameters: '" + "','".join(self._missing) + "' where not set"
return _responds(RESULT_ERROR, msg=msg)
def check_params(self, args, kwargs, key=None, default=None, required=None, arg_type=None, allowed_values=None):
""" function to check passed params for the shorthand wrapper
and to detect missing/required params
"""
# auto-select indexer
if key in indexer_ids:
if "tvdbid" in kwargs:
key = "tvdbid"
self.indexer = indexer_ids.index(key)
if key:
missing = True
org_default = default
if arg_type == "bool":
allowed_values = [0, 1]
if args:
default = args[0]
missing = False
args = args[1:]
if kwargs.get(key):
default = kwargs.get(key)
missing = False
key_value = {
"allowedValues": allowed_values,
"defaultValue": org_default,
"type": arg_type
}
if required:
self._requiredParams[key] = key_value
if missing and key not in self._missing:
self._missing.append(key)
else:
self._optionalParams[key] = key_value
if default:
default = self._check_param_type(default, key, arg_type)
self._check_param_value(default, key, allowed_values)
if self._missing:
setattr(self, "run", self.return_missing)
if 'help' in kwargs:
setattr(self, "run", self.return_help)
return default, args
@staticmethod
def _check_param_type(value, name, arg_type):
""" checks if value can be converted / parsed to arg_type
will raise an error on failure
or will convert it to arg_type and return new converted value
can check for:
- int: will be converted into int
- bool: will be converted to False / True
- list: will always return a list
- string: will do nothing for now
- ignore: will ignore it, just like "string"
"""
error = False
if arg_type == "int":
if _is_int(value):
value = int(value)
else:
error = True
elif arg_type == "bool":
if value in ("0", "1"):
value = bool(int(value))
elif value in ("true", "True", "TRUE"):
value = True
elif value in ("false", "False", "FALSE"):
value = False
elif value not in (True, False):
error = True
elif arg_type == "list":
value = value.split("|")
elif arg_type == "string":
pass
elif arg_type == "ignore":
pass
else:
logger.log('API :: Invalid param type: "{0}" can not be checked. Ignoring it.'.format(str(arg_type)), logger.ERROR)
if error:
# this is a real ApiError !!
raise ApiError('param "{0}" with given value "{1}" could not be parsed into "{2}"'.format(str(name), str(value), str(arg_type)))
return value
@staticmethod
def _check_param_value(value, name, allowed_values):
""" will check if value (or all values in it ) are in allowed values
will raise an exception if value is "out of range"
if bool(allowed_value) is False a check is not performed and all values are excepted
"""
if allowed_values:
error = False
if isinstance(value, list):
for item in value:
if item not in allowed_values:
error = True
else:
if value not in allowed_values:
error = True
if error:
# this is kinda a ApiError but raising an error is the only way of quitting here
raise ApiError("param: '" + str(name) + "' with given value: '" + str(
value) + "' is out of allowed range '" + str(allowed_values) + "'")
# noinspection PyAbstractClass
class TVDBShorthandWrapper(ApiCall):
_help = {"desc": "This is an internal function wrapper. Call the help command directly for more information."}
def __init__(self, args, kwargs, sid):
super(TVDBShorthandWrapper, self).__init__(args, kwargs)
self.origArgs = args
self.kwargs = kwargs
self.sid = sid
self.s, args = self.check_params(args, kwargs, "s", None, False, "ignore", [])
self.e, args = self.check_params(args, kwargs, "e", None, False, "ignore", [])
self.args = args
def run(self):
""" internal function wrapper """
args = (self.sid,) + self.origArgs
if self.e:
return CMDEpisode(args, self.kwargs).run()
elif self.s:
return CMDShowSeasons(args, self.kwargs).run()
else:
return CMDShow(args, self.kwargs).run()
# ###############################
# helper functions #
# ###############################
def _is_int(data):
try:
int(data)
except (TypeError, ValueError, OverflowError):
return False
else:
return True
def _rename_element(dict_obj, old_key, new_key):
try:
dict_obj[new_key] = dict_obj[old_key]
del dict_obj[old_key]
except (ValueError, TypeError, NameError):
pass
return dict_obj
def _responds(result_type, data=None, msg=""):
"""
result is a string of given "type" (success/failure/timeout/error)
message is a human readable string, can be empty
data is either a dict or a array, can be a empty dict or empty array
"""
return {
"result": result_type_map[result_type],
"message": msg,
"data": {} if not data else data
}
def _get_status_strings(s):
return statusStrings[s]
def _ordinal_to_datetime_form(ordinal):
# workaround for episodes with no air date
if int(ordinal) != 1:
date = datetime.date.fromordinal(ordinal)
else:
return ""
return date.strftime(dateTimeFormat)
def _ordinal_to_date_form(ordinal):
if int(ordinal) != 1:
date = datetime.date.fromordinal(ordinal)
else:
return ""
return date.strftime(dateFormat)
def _history_date_to_datetime_form(time_string):
date = datetime.datetime.strptime(time_string, History.date_format)
return date.strftime(dateTimeFormat)
QUALITY_MAP = {
Quality.SDTV: 'sdtv',
'sdtv': Quality.SDTV,
Quality.SDDVD: 'sddvd',
'sddvd': Quality.SDDVD,
Quality.HDTV: 'hdtv',
'hdtv': Quality.HDTV,
Quality.RAWHDTV: 'rawhdtv',
'rawhdtv': Quality.RAWHDTV,
Quality.FULLHDTV: 'fullhdtv',
'fullhdtv': Quality.FULLHDTV,
Quality.HDWEBDL: 'hdwebdl',
'hdwebdl': Quality.HDWEBDL,
Quality.FULLHDWEBDL: 'fullhdwebdl',
'fullhdwebdl': Quality.FULLHDWEBDL,
Quality.HDBLURAY: 'hdbluray',
'hdbluray': Quality.HDBLURAY,
Quality.FULLHDBLURAY: 'fullhdbluray',
'fullhdbluray': Quality.FULLHDBLURAY,
Quality.UHD_4K_TV: 'uhd4ktv',
'udh4ktv': Quality.UHD_4K_TV,
Quality.UHD_4K_BLURAY: '4kbluray',
'uhd4kbluray': Quality.UHD_4K_BLURAY,
Quality.UHD_4K_WEBDL: '4kwebdl',
'udh4kwebdl': Quality.UHD_4K_WEBDL,
Quality.UHD_8K_TV: 'uhd8ktv',
'udh8ktv': Quality.UHD_8K_TV,
Quality.UHD_8K_BLURAY: 'uhd8kbluray',
'uhd8kbluray': Quality.UHD_8K_BLURAY,
Quality.UHD_8K_WEBDL: 'udh8kwebdl',
"udh8kwebdl": Quality.UHD_8K_WEBDL,
Quality.UNKNOWN: 'unknown',
'unknown': Quality.UNKNOWN
}
ALLOWED_QUALITY_LIST = [
"sdtv", "sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray",
"udh4ktv", "uhd4kbluray", "udh4kwebdl", "udh8ktv", "uhd8kbluray", "udh8kwebdl", "unknown"
]
PREFERRED_QUALITY_LIST = [
"sdtv", "sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray",
"udh4ktv", "uhd4kbluray", "udh4kwebdl", "udh8ktv", "uhd8kbluray", "udh8kwebdl"
]
def _map_quality(show_quality):
any_qualities = []
best_qualities = []
i_quality_id, a_quality_id = Quality.splitQuality(int(show_quality))
for quality in i_quality_id:
any_qualities.append((QUALITY_MAP[quality], "N/A")[quality is None])
for quality in a_quality_id:
best_qualities.append((QUALITY_MAP[quality], "N/A")[quality is None])
return any_qualities, best_qualities
def _get_root_dirs():
if sickbeard.ROOT_DIRS == "":
return {}
root_dir = {}
root_dirs = sickbeard.ROOT_DIRS.split('|')
default_index = int(sickbeard.ROOT_DIRS.split('|')[0])
root_dir["default_index"] = int(sickbeard.ROOT_DIRS.split('|')[0])
# remove default_index value from list (this fixes the offset)
root_dirs.pop(0)
if len(root_dirs) < default_index:
return {}
# clean up the list - replace %xx escapes by their single-character equivalent
root_dirs = [urllib.parse.unquote_plus(x) for x in root_dirs]
default_dir = root_dirs[default_index]
dir_list = []
for root_dir in root_dirs:
valid = 1
# noinspection PyBroadException
try:
ek(os.listdir, root_dir)
except Exception:
valid = 0
default = 0
if root_dir is default_dir:
default = 1
cur_dir = {
'valid': valid,
'location': root_dir,
'default': default
}
dir_list.append(cur_dir)
return dir_list
class ApiError(Exception):
"""
Generic API error
"""
class IntParseError(Exception):
"""
A value could not be parsed into an int, but should be parse-able to an int
"""
# -------------------------------------------------------------------------------------#
# noinspection PyAbstractClass
class CMDHelp(ApiCall):
_help = {
"desc": "Get help about a given command",
"optionalParameters": {
"subject": {"desc": "The name of the command to get the help of"},
}
}
def __init__(self, args, kwargs):
super(CMDHelp, self).__init__(args, kwargs)
self.subject, args = self.check_params(args, kwargs, "subject", "help", False, "string", function_mapper.keys())
def run(self):
""" Get help about a given command """
if self.subject in function_mapper:
out = _responds(RESULT_SUCCESS, function_mapper.get(self.subject)((), {"help": 1}).run())
else:
out = _responds(RESULT_FAILURE, msg="No such cmd")
return out
# noinspection PyAbstractClass
class CMDComingEpisodes(ApiCall):
_help = {
"desc": "Get the coming episodes",
"optionalParameters": {
"sort": {"desc": "Change the sort order"},
"type": {"desc": "One or more categories of coming episodes, separated by |"},
"paused": {
"desc": "0 to exclude paused shows, 1 to include them, or omitted to use SickChill default value"
},
}
}
def __init__(self, args, kwargs):
super(CMDComingEpisodes, self).__init__(args, kwargs)
self.sort, args = self.check_params(args, kwargs, "sort", "date", False, "string", ComingEpisodes.sorts.keys())
self.type, args = self.check_params(args, kwargs, "type", '|'.join(ComingEpisodes.categories), False, "list",
ComingEpisodes.categories)
self.paused, args = self.check_params(args, kwargs, "paused", bool(sickbeard.COMING_EPS_DISPLAY_PAUSED), False,
"bool", [])
def run(self):
""" Get the coming episodes """
grouped_coming_episodes = ComingEpisodes.get_coming_episodes(self.type, self.sort, True, self.paused)
data = {section: [] for section in grouped_coming_episodes.keys()}
# noinspection PyCompatibility
for section, coming_episodes in six.iteritems(grouped_coming_episodes):
for coming_episode in coming_episodes:
data[section].append({
'airdate': coming_episode[b'airdate'],
'airs': coming_episode[b'airs'],
'ep_name': coming_episode[b'name'],
'ep_plot': coming_episode[b'description'],
'episode': coming_episode[b'episode'],
'indexerid': coming_episode[b'indexer_id'],
'network': coming_episode[b'network'],
'paused': coming_episode[b'paused'],
'quality': coming_episode[b'quality'],
'season': coming_episode[b'season'],
'show_name': coming_episode[b'show_name'],
'show_status': coming_episode[b'status'],
'tvdbid': coming_episode[b'tvdbid'],
'weekday': coming_episode[b'weekday']
})
return _responds(RESULT_SUCCESS, data)
# noinspection PyAbstractClass
class CMDEpisode(ApiCall):
_help = {
"desc": "Get detailed information about an episode",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
"season": {"desc": "The season number"},
"episode": {"desc": "The episode number"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
"full_path": {
"desc": "Return the full absolute show location (if valid, and True), or the relative show location"
},
}
}
def __init__(self, args, kwargs):
super(CMDEpisode, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
self.s, args = self.check_params(args, kwargs, "season", None, True, "int", [])
self.e, args = self.check_params(args, kwargs, "episode", None, True, "int", [])
self.fullPath, args = self.check_params(args, kwargs, "full_path", False, False, "bool", [])
def run(self):
""" Get detailed information about an episode """
show_obj = Show.find(sickbeard.showList, int(self.indexerid))
if not show_obj:
return _responds(RESULT_FAILURE, msg="Show not found")
main_db_con = db.DBConnection(row_type="dict")
# noinspection PyPep8
sql_results = main_db_con.select(
"SELECT name, description, airdate, status, location, file_size, release_name, subtitles FROM tv_episodes WHERE showid = ? AND episode = ? AND season = ?",
[self.indexerid, self.e, self.s])
if not len(sql_results) == 1:
raise ApiError("Episode not found")
episode = sql_results[0]
# handle path options
# absolute vs relative vs broken
show_path = None
try:
show_path = show_obj.location
except ShowDirectoryNotFoundException:
pass
if not show_path: # show dir is broken ... episode path will be empty
episode[b"location"] = ""
elif not self.fullPath:
# using the length because lstrip() removes to much
show_path_length = len(show_path) + 1 # the / or \ yeah not that nice i know
episode[b"location"] = episode[b"location"][show_path_length:]
# convert stuff to human form
if try_int(episode[b'airdate'], 1) > 693595: # 1900
episode[b'airdate'] = sbdatetime.sbdatetime.sbfdate(sbdatetime.sbdatetime.convert_to_setting(
network_timezones.parse_date_time(int(episode[b'airdate']), show_obj.airs, show_obj.network)), d_preset=dateFormat)
else:
episode[b'airdate'] = 'Never'
status, quality = Quality.splitCompositeStatus(int(episode[b"status"]))
episode[b"status"] = _get_status_strings(status)
episode[b"quality"] = get_quality_string(quality)
episode[b"file_size_human"] = pretty_file_size(episode[b"file_size"])
return _responds(RESULT_SUCCESS, episode)
# noinspection PyAbstractClass
class CMDEpisodeSearch(ApiCall):
_help = {
"desc": "Search for an episode. The response might take some time.",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
"season": {"desc": "The season number"},
"episode": {"desc": "The episode number"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
}
}
def __init__(self, args, kwargs):
super(CMDEpisodeSearch, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
self.s, args = self.check_params(args, kwargs, "season", None, True, "int", [])
self.e, args = self.check_params(args, kwargs, "episode", None, True, "int", [])
def run(self):
""" Search for an episode """
show_obj = Show.find(sickbeard.showList, int(self.indexerid))
if not show_obj:
return _responds(RESULT_FAILURE, msg="Show not found")
# retrieve the episode object and fail if we can't get one
ep_obj = show_obj.getEpisode(self.s, self.e)
if isinstance(ep_obj, str):
return _responds(RESULT_FAILURE, msg="Episode not found")
# make a queue item for it and put it on the queue
ep_queue_item = search_queue.ManualSearchQueueItem(show_obj, ep_obj)
sickbeard.searchQueueScheduler.action.add_item(ep_queue_item) # @UndefinedVariable
# wait until the queue item tells us whether it worked or not
while ep_queue_item.success is None: # @UndefinedVariable
time.sleep(1)
# return the correct json value
if ep_queue_item.success:
status, quality = Quality.splitCompositeStatus(ep_obj.status) # @UnusedVariable
# TODO: split quality and status?
return _responds(RESULT_SUCCESS, {"quality": get_quality_string(quality)},
"Snatched (" + get_quality_string(quality) + ")")
return _responds(RESULT_FAILURE, msg='Unable to find episode')
class AbstractStartScheduler(ApiCall):
@property
@abc.abstractmethod
def scheduler(self):
raise NotImplementedError
@property
@abc.abstractmethod
def scheduler_class_str(self):
raise NotImplementedError
def run(self):
error_str = 'Start scheduler failed'
if not isinstance(self.scheduler, sickbeard.scheduler.Scheduler):
error_str = '{0}: {1} is not initialized as a static variable'.format(error_str, self.scheduler_class_str)
return _responds(RESULT_FAILURE, msg=error_str)
if not self.scheduler.enable:
error_str = '{0}: {1} is not enabled'.format(error_str, self.scheduler_class_str)
return _responds(RESULT_FAILURE, msg=error_str)
if not hasattr(self.scheduler.action, 'amActive'):
error_str = '{0}: {1} is not a valid action'.format(error_str, self.scheduler.action)
return _responds(RESULT_FAILURE, msg=error_str)
time_remain = self.scheduler.timeLeft()
# Force the search to start in order to skip the search interval check
if self.scheduler.forceRun():
cycle_time = self.scheduler.cycleTime
next_run = datetime.datetime.now() + cycle_time
result_str = 'Force run successful: {0} search underway. Time Remaining: {1}. ' \
'Next Run: {2}'.format(self.scheduler_class_str, time_remain, next_run)
return _responds(RESULT_SUCCESS, msg=result_str)
else:
# Scheduler is currently active
error_str = '{0}: {1} search underway. Time remaining: {}.'.format(
error_str, self.scheduler_class_str, time_remain)
return _responds(RESULT_FAILURE, msg=error_str)
class CMDFullSubtitleSearch(AbstractStartScheduler):
def data_received(self, chunk):
pass
_help = {"desc": "Force a subtitle search for all shows."}
@property
def scheduler(self):
return sickbeard.subtitlesFinderScheduler
@property
def scheduler_class_str(self):
return 'sickbeard.subtitlesFinderScheduler'
class CMDProperSearch(AbstractStartScheduler):
def data_received(self, chunk):
pass
_help = {"desc": "Force a proper search for all shows."}
@property
def scheduler(self):
return sickbeard.properFinderScheduler
@property
def scheduler_class_str(self):
return 'sickbeard.properFinderScheduler'
class CMDDailySearch(AbstractStartScheduler):
def data_received(self, chunk):
pass
_help = {"desc": "Force a daily search for all shows."}
@property
def scheduler(self):
return sickbeard.dailySearchScheduler
@property
def scheduler_class_str(self):
return 'sickbeard.dailySearchScheduler'
# noinspection PyAbstractClass
class CMDEpisodeSetStatus(ApiCall):
_help = {
"desc": "Set the status of an episode or a season (when no episode is provided)",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
"season": {"desc": "The season number"},
"status": {"desc": "The status of the episode or season"}
},
"optionalParameters": {
"episode": {"desc": "The episode number"},
"force": {"desc": "True to replace existing downloaded episode or season, False otherwise"},
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
}
}
def __init__(self, args, kwargs):
super(CMDEpisodeSetStatus, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
self.s, args = self.check_params(args, kwargs, "season", None, True, "int", [])
self.status, args = self.check_params(args, kwargs, "status", None, True, "string",
["wanted", "skipped", "ignored", "failed"])
self.e, args = self.check_params(args, kwargs, "episode", None, False, "int", [])
self.force, args = self.check_params(args, kwargs, "force", False, False, "bool", [])
def run(self):
""" Set the status of an episode or a season (when no episode is provided) """
show_obj = Show.find(sickbeard.showList, int(self.indexerid))
if not show_obj:
return _responds(RESULT_FAILURE, msg="Show not found")
# convert the string status to a int
for status in statusStrings:
if str(statusStrings[status]).lower() == str(self.status).lower():
self.status = status
break
else: # if we don't break out of the for loop we got here.
# the allowed values has at least one item that could not be matched against the internal status strings
raise ApiError("The status string could not be matched to a status. Report to Devs!")
if self.e:
ep_obj = show_obj.getEpisode(self.s, self.e)
if not ep_obj:
return _responds(RESULT_FAILURE, msg="Episode not found")
ep_list = [ep_obj]
else:
# get all episode numbers from self, season
ep_list = show_obj.getAllEpisodes(season=self.s)
def _ep_result(result_code, ep, msg=""):
return {
'season': ep.season, 'episode': ep.episode, 'status': _get_status_strings(ep.status),
'result': result_type_map[result_code], 'message': msg
}
ep_results = []
failure = False
start_backlog = False
segments = {}
sql_l = []
for ep_obj in ep_list:
with ep_obj.lock:
if self.status == WANTED:
# figure out what episodes are wanted so we can backlog them
if ep_obj.season in segments:
segments[ep_obj.season].append(ep_obj)
else:
segments[ep_obj.season] = [ep_obj]
# don't let them mess up UN-AIRED episodes
if ep_obj.status == UNAIRED:
# noinspection PyPep8
if self.e is not None: # setting the status of an un-aired is only considered a failure if we directly wanted this episode, but is ignored on a season request
ep_results.append(
_ep_result(RESULT_FAILURE, ep_obj, "Refusing to change status because it is UN-AIRED"))
failure = True
continue
if self.status == FAILED and not sickbeard.USE_FAILED_DOWNLOADS:
ep_results.append(_ep_result(RESULT_FAILURE, ep_obj, "Refusing to change status to FAILED because failed download handling is disabled"))
failure = True
continue
# allow the user to force setting the status for an already downloaded episode
if ep_obj.status in Quality.DOWNLOADED + Quality.ARCHIVED and not self.force:
ep_results.append(_ep_result(RESULT_FAILURE, ep_obj, "Refusing to change status because it is already marked as DOWNLOADED"))
failure = True
continue
ep_obj.status = self.status
sql_l.append(ep_obj.get_sql())
if self.status == WANTED:
start_backlog = True
ep_results.append(_ep_result(RESULT_SUCCESS, ep_obj))
if sql_l:
main_db_con = db.DBConnection()
main_db_con.mass_action(sql_l)
extra_msg = ""
if start_backlog:
# noinspection PyCompatibility
for season, segment in six.iteritems(segments):
cur_backlog_queue_item = search_queue.BacklogQueueItem(show_obj, segment)
sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item) # @UndefinedVariable
logger.log("API :: Starting backlog for " + show_obj.name + " season " + str(
season) + " because some episodes were set to WANTED")
extra_msg = " Backlog started"
if failure:
return _responds(RESULT_FAILURE, ep_results, 'Failed to set all or some status. Check data.' + extra_msg)
else:
return _responds(RESULT_SUCCESS, msg='All status set successfully.' + extra_msg)
# noinspection PyAbstractClass
class CMDSubtitleSearch(ApiCall):
_help = {
"desc": "Search for an episode subtitles. The response might take some time.",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
"season": {"desc": "The season number"},
"episode": {"desc": "The episode number"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
}
}
def __init__(self, args, kwargs):
super(CMDSubtitleSearch, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
self.s, args = self.check_params(args, kwargs, "season", None, True, "int", [])
self.e, args = self.check_params(args, kwargs, "episode", None, True, "int", [])
def run(self):
""" Search for an episode subtitles """
show_obj = Show.find(sickbeard.showList, int(self.indexerid))
if not show_obj:
return _responds(RESULT_FAILURE, msg="Show not found")
# retrieve the episode object and fail if we can't get one
ep_obj = show_obj.getEpisode(self.s, self.e)
if isinstance(ep_obj, str):
return _responds(RESULT_FAILURE, msg="Episode not found")
# noinspection PyBroadException
try:
new_subtitles = ep_obj.download_subtitles()
except Exception:
return _responds(RESULT_FAILURE, msg='Unable to find subtitles')
if new_subtitles:
new_languages = [sickbeard.subtitles.name_from_code(code) for code in new_subtitles]
status = 'New subtitles downloaded: {0}'.format(', '.join(new_languages))
response = _responds(RESULT_SUCCESS, msg='New subtitles found')
else:
status = 'No subtitles downloaded'
response = _responds(RESULT_FAILURE, msg='Unable to find subtitles')
ui.notifications.message('Subtitles Search', status)
return response
# noinspection PyAbstractClass
class CMDExceptions(ApiCall):
_help = {
"desc": "Get the scene exceptions for all or a given show",
"optionalParameters": {
"indexerid": {"desc": "Unique ID of a show"},
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
}
}
def __init__(self, args, kwargs):
super(CMDExceptions, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, False, "int", [])
self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, False, "int", [])
def run(self):
""" Get the scene exceptions for all or a given show """
cache_db_con = db.DBConnection('cache.db', row_type='dict')
if self.indexerid is None:
sql_results = cache_db_con.select("SELECT show_name, indexer_id AS 'indexerid' FROM scene_exceptions")
scene_exceptions = {}
for row in sql_results:
indexerid = row[b"indexerid"]
if indexerid not in scene_exceptions:
scene_exceptions[indexerid] = []
scene_exceptions[indexerid].append(row[b"show_name"])
else:
show_obj = Show.find(sickbeard.showList, int(self.indexerid))
if not show_obj:
return _responds(RESULT_FAILURE, msg="Show not found")
sql_results = cache_db_con.select(
"SELECT show_name, indexer_id AS 'indexerid' FROM scene_exceptions WHERE indexer_id = ?",
[self.indexerid])
scene_exceptions = []
for row in sql_results:
scene_exceptions.append(row[b"show_name"])
return _responds(RESULT_SUCCESS, scene_exceptions)
# noinspection PyAbstractClass
class CMDHistory(ApiCall):
_help = {
"desc": "Get the downloaded and/or snatched history",
"optionalParameters": {
"limit": {"desc": "The maximum number of results to return"},
"type": {"desc": "Only get some entries. No value will returns every type"},
}
}
def __init__(self, args, kwargs):
super(CMDHistory, self).__init__(args, kwargs)
self.limit, args = self.check_params(args, kwargs, "limit", 100, False, "int", [])
self.type, args = self.check_params(args, kwargs, "type", None, False, "string", ["downloaded", "snatched"])
self.type = self.type.lower() if isinstance(self.type, str) else ''
def run(self):
""" Get the downloaded and/or snatched history """
data = History().get(self.limit, self.type)
results = []
for row in data:
status, quality = Quality.splitCompositeStatus(int(row[b"action"]))
status = _get_status_strings(status)
if self.type and not status.lower() == self.type:
continue
row[b"status"] = status
row[b"quality"] = get_quality_string(quality)
row[b"date"] = _history_date_to_datetime_form(str(row[b"date"]))
del row[b"action"]
_rename_element(row, "show_id", "indexerid")
row[b"resource_path"] = ek(os.path.dirname, row[b"resource"])
row[b"resource"] = ek(os.path.basename, row[b"resource"])
# Add tvdbid for backward compatibility
row[b'tvdbid'] = row[b'indexerid']
results.append(row)
return _responds(RESULT_SUCCESS, results)
# noinspection PyAbstractClass
class CMDHistoryClear(ApiCall):
_help = {"desc": "Clear the entire history"}
def __init__(self, args, kwargs):
super(CMDHistoryClear, self).__init__(args, kwargs)
def run(self):
""" Clear the entire history """
History().clear()
return _responds(RESULT_SUCCESS, msg="History cleared")
# noinspection PyAbstractClass
class CMDHistoryTrim(ApiCall):
_help = {"desc": "Trim history entries older than 30 days"}
def __init__(self, args, kwargs):
super(CMDHistoryTrim, self).__init__(args, kwargs)
def run(self):
""" Trim history entries older than 30 days """
History().trim()
return _responds(RESULT_SUCCESS, msg='Removed history entries older than 30 days')
# noinspection PyAbstractClass
class CMDFailed(ApiCall):
_help = {
"desc": "Get the failed downloads",
"optionalParameters": {
"limit": {"desc": "The maximum number of results to return"},
}
}
def __init__(self, args, kwargs):
super(CMDFailed, self).__init__(args, kwargs)
self.limit, args = self.check_params(args, kwargs, "limit", 100, False, "int", [])
def run(self):
""" Get the failed downloads """
failed_db_con = db.DBConnection('failed.db', row_type="dict")
u_limit = min(int(self.limit), 100)
if u_limit == 0:
sql_results = failed_db_con.select("SELECT * FROM failed")
else:
sql_results = failed_db_con.select("SELECT * FROM failed LIMIT ?", [u_limit])
return _responds(RESULT_SUCCESS, sql_results)
# noinspection PyAbstractClass
class CMDBacklog(ApiCall):
_help = {"desc": "Get the backlogged episodes"}
def __init__(self, args, kwargs):
super(CMDBacklog, self).__init__(args, kwargs)
def run(self):
""" Get the backlogged episodes """
shows = []
main_db_con = db.DBConnection(row_type="dict")
for curShow in sickbeard.showList:
show_eps = []
# noinspection PyPep8
sql_results = main_db_con.select(
"SELECT tv_episodes.*, tv_shows.paused FROM tv_episodes INNER JOIN tv_shows ON tv_episodes.showid = tv_shows.indexer_id WHERE showid = ? AND paused = 0 ORDER BY season DESC, episode DESC",
[curShow.indexerid])
for curResult in sql_results:
cur_ep_cat = curShow.getOverview(curResult[b"status"])
if cur_ep_cat and cur_ep_cat in (Overview.WANTED, Overview.QUAL):
show_eps.append(curResult)
if show_eps:
shows.append({
"indexerid": curShow.indexerid,
"show_name": curShow.name,
"status": curShow.status,
"episodes": show_eps
})
return _responds(RESULT_SUCCESS, shows)
# noinspection PyAbstractClass
class CMDLogs(ApiCall):
_help = {
"desc": "Get the logs",
"optionalParameters": {
"min_level": {
"desc":
"The minimum level classification of log entries to return. "
"Each level inherits its above levels: debug < info < warning < error"
},
}
}
def __init__(self, args, kwargs):
super(CMDLogs, self).__init__(args, kwargs)
self.min_level, args = self.check_params(args, kwargs, "min_level", "error", False, "string",
["error", "warning", "info", "debug"])
def run(self):
""" Get the logs """
# 10 = Debug / 20 = Info / 30 = Warning / 40 = Error
min_level = logger.LOGGING_LEVELS[str(self.min_level).upper()]
data = []
if ek(os.path.isfile, logger.log_file):
with io.open(logger.log_file, 'r', encoding='utf-8') as f:
data = f.readlines()
regex = r"^(\d\d\d\d)\-(\d\d)\-(\d\d)\s*(\d\d)\:(\d\d):(\d\d)\s*([A-Z]+)\s*(.+?)\s*\:\:\s*(.*)$"
final_data = []
num_lines = 0
last_line = False
num_to_show = min(50, len(data))
for x in reversed(data):
match = re.match(regex, x)
if match:
level = match.group(7)
if level not in logger.LOGGING_LEVELS:
last_line = False
continue
if logger.LOGGING_LEVELS[level] >= min_level:
last_line = True
final_data.append(x.rstrip("\n"))
else:
last_line = False
continue
elif last_line:
final_data.append("AA" + x)
num_lines += 1
if num_lines >= num_to_show:
break
return _responds(RESULT_SUCCESS, final_data)
# noinspection PyAbstractClass
class CMDLogsClear(ApiCall):
_help = {
"desc": "Clear the logs",
"optionalParameters": {
"level": {"desc": "The level of logs to clear"},
},
}
def __init__(self, args, kwargs):
super(CMDLogsClear, self).__init__(args, kwargs)
self.level, args = self.check_params(args, kwargs, "level", "warning", False, "string", ["warning", "error"])
def run(self):
""" Clear the logs """
if self.level == "error":
msg = "Error logs cleared"
classes.ErrorViewer.clear()
elif self.level == "warning":
msg = "Warning logs cleared"
classes.WarningViewer.clear()
else:
return _responds(RESULT_FAILURE, msg="Unknown log level: {0}".format(self.level))
return _responds(RESULT_SUCCESS, msg=msg)
# noinspection PyAbstractClass
class CMDPostProcess(ApiCall):
_help = {
"desc": "Manually post-process the files in the download folder",
"optionalParameters": {
"path": {"desc": "The path to the folder to post-process"},
"force_replace": {"desc": "Force already post-processed files to be post-processed again"},
"force_next": {"desc": "Waits for the current processing queue item to finish and returns result of this request"},
"return_data": {"desc": "Returns the result of the post-process"},
"process_method": {"desc": "How should valid post-processed files be handled"},
"is_priority": {"desc": "Replace the file even if it exists in a higher quality"},
"failed": {"desc": "Mark download as failed"},
"delete": {"desc": "Delete processed files and folders"},
"type": {"desc": "The type of post-process being requested"},
}
}
def __init__(self, args, kwargs):
super(CMDPostProcess, self).__init__(args, kwargs)
self.path, args = self.check_params(args, kwargs, "path", None, False, "string", [])
self.force_replace, args = self.check_params(args, kwargs, "force_replace", False, False, "bool", [])
self.force_next, args = self.check_params(args, kwargs, "force_next", False, False, "bool", [])
self.return_data, args = self.check_params(args, kwargs, "return_data", False, False, "bool", [])
self.process_method, args = self.check_params(args, kwargs, "process_method", False, False, "string",
PROCESS_METHODS)
self.is_priority, args = self.check_params(args, kwargs, "is_priority", False, False, "bool", [])
self.failed, args = self.check_params(args, kwargs, "failed", False, False, "bool", [])
self.delete, args = self.check_params(args, kwargs, "delete", False, False, "bool", [])
self.type, args = self.check_params(args, kwargs, "type", "auto", None, "string", ["auto", "manual"])
def run(self):
""" Manually post-process the files in the download folder """
if not self.path and not sickbeard.TV_DOWNLOAD_DIR:
return _responds(RESULT_FAILURE, msg="You need to provide a path or set TV Download Dir")
if not self.path:
self.path = sickbeard.TV_DOWNLOAD_DIR
if not self.type:
self.type = 'manual'
data = sickbeard.postProcessorTaskScheduler.action.add_item(
self.path, method=self.process_method, force=self.force_replace,
is_priority=self.is_priority, failed=self.failed, delete=self.delete,
mode=self.type, force_next=self.force_next
)
if not self.return_data:
data = ""
return _responds(RESULT_SUCCESS, data=data, msg="Started post-process for {0}".format(self.path))
# noinspection PyAbstractClass
class CMDSickBeard(ApiCall):
_help = {"desc": "Get miscellaneous information about SickChill"}
def __init__(self, args, kwargs):
super(CMDSickBeard, self).__init__(args, kwargs)
def run(self):
""" dGet miscellaneous information about SickChill """
data = {
"sr_version": sickbeard.BRANCH, "api_version": self.version,
"api_commands": sorted(function_mapper.keys())
}
return _responds(RESULT_SUCCESS, data)
# noinspection PyAbstractClass
class CMDSickBeardAddRootDir(ApiCall):
_help = {
"desc": "Add a new root (parent) directory to SickChill",
"requiredParameters": {
"location": {"desc": "The full path to the new root (parent) directory"},
},
"optionalParameters": {
"default": {"desc": "Make this new location the default root (parent) directory"},
}
}
def __init__(self, args, kwargs):
super(CMDSickBeardAddRootDir, self).__init__(args, kwargs)
self.location, args = self.check_params(args, kwargs, "location", None, True, "string", [])
self.default, args = self.check_params(args, kwargs, "default", False, False, "bool", [])
def run(self):
""" Add a new root (parent) directory to SickChill """
self.location = urllib.parse.unquote_plus(self.location)
location_matched = 0
index = 0
# disallow adding/setting an invalid dir
if not ek(os.path.isdir, self.location):
return _responds(RESULT_FAILURE, msg="Location is invalid")
root_dirs = []
if sickbeard.ROOT_DIRS == "":
self.default = 1
else:
root_dirs = sickbeard.ROOT_DIRS.split('|')
index = int(sickbeard.ROOT_DIRS.split('|')[0])
root_dirs.pop(0)
# clean up the list - replace %xx escapes by their single-character equivalent
root_dirs = [urllib.parse.unquote_plus(x) for x in root_dirs]
for x in root_dirs:
if x == self.location:
location_matched = 1
if self.default == 1:
index = root_dirs.index(self.location)
break
if location_matched == 0:
if self.default == 1:
root_dirs.insert(0, self.location)
else:
root_dirs.append(self.location)
root_dirs_new = [urllib.parse.unquote_plus(x) for x in root_dirs]
root_dirs_new.insert(0, index)
# noinspection PyCompatibility
root_dirs_new = '|'.join(six.text_type(x) for x in root_dirs_new)
sickbeard.ROOT_DIRS = root_dirs_new
return _responds(RESULT_SUCCESS, _get_root_dirs(), msg="Root directories updated")
# noinspection PyAbstractClass
class CMDSickBeardCheckVersion(ApiCall):
_help = {"desc": "Check if a new version of SickChill is available"}
def __init__(self, args, kwargs):
super(CMDSickBeardCheckVersion, self).__init__(args, kwargs)
def run(self):
check_version = CheckVersion()
needs_update = check_version.check_for_new_version()
data = {
"current_version": {
"branch": check_version.get_branch(),
"commit": check_version.updater.get_cur_commit_hash(),
"version": check_version.updater.get_cur_version(),
},
"latest_version": {
"branch": check_version.get_branch(),
"commit": check_version.updater.get_newest_commit_hash(),
"version": check_version.updater.get_newest_version(),
},
"commits_offset": check_version.updater.get_num_commits_behind(),
"needs_update": needs_update,
}
return _responds(RESULT_SUCCESS, data)
# noinspection PyAbstractClass
class CMDSickBeardCheckScheduler(ApiCall):
_help = {"desc": "Get information about the scheduler"}
def __init__(self, args, kwargs):
super(CMDSickBeardCheckScheduler, self).__init__(args, kwargs)
def run(self):
""" Get information about the scheduler """
main_db_con = db.DBConnection(row_type="dict")
sql_results = main_db_con.select("SELECT last_backlog FROM info")
backlog_paused = sickbeard.searchQueueScheduler.action.is_backlog_paused() # @UndefinedVariable
backlog_running = sickbeard.searchQueueScheduler.action.is_backlog_in_progress() # @UndefinedVariable
next_backlog = sickbeard.backlogSearchScheduler.nextRun().strftime(dateFormat).decode(sickbeard.SYS_ENCODING)
data = {
"backlog_is_paused": int(backlog_paused), "backlog_is_running": int(backlog_running),
"last_backlog": _ordinal_to_date_form(sql_results[0][b"last_backlog"]),
"next_backlog": next_backlog
}
return _responds(RESULT_SUCCESS, data)
# noinspection PyAbstractClass
class CMDSickBeardDeleteRootDir(ApiCall):
_help = {
"desc": "Delete a root (parent) directory from SickChill",
"requiredParameters": {
"location": {"desc": "The full path to the root (parent) directory to remove"},
}
}
def __init__(self, args, kwargs):
super(CMDSickBeardDeleteRootDir, self).__init__(args, kwargs)
self.location, args = self.check_params(args, kwargs, "location", None, True, "string", [])
def run(self):
""" Delete a root (parent) directory from SickChill """
if sickbeard.ROOT_DIRS == "":
return _responds(RESULT_FAILURE, _get_root_dirs(), msg="No root directories detected")
new_index = 0
root_dirs_new = []
root_dirs = sickbeard.ROOT_DIRS.split('|')
index = int(root_dirs[0])
root_dirs.pop(0)
# clean up the list - replace %xx escapes by their single-character equivalent
root_dirs = [urllib.parse.unquote_plus(x) for x in root_dirs]
old_root_dir = root_dirs[index]
for curRootDir in root_dirs:
if not curRootDir == self.location:
root_dirs_new.append(curRootDir)
else:
new_index = 0
for curIndex, curNewRootDir in enumerate(root_dirs_new):
if curNewRootDir is old_root_dir:
new_index = curIndex
break
root_dirs_new = [urllib.parse.unquote_plus(x) for x in root_dirs_new]
if len(root_dirs_new) > 0:
root_dirs_new.insert(0, new_index)
# noinspection PyCompatibility
root_dirs_new = "|".join(six.text_type(x) for x in root_dirs_new)
sickbeard.ROOT_DIRS = root_dirs_new
# what if the root dir was not found?
return _responds(RESULT_SUCCESS, _get_root_dirs(), msg="Root directory deleted")
# noinspection PyAbstractClass
class CMDSickBeardGetDefaults(ApiCall):
_help = {"desc": "Get SickChill's user default configuration value"}
def __init__(self, args, kwargs):
super(CMDSickBeardGetDefaults, self).__init__(args, kwargs)
def run(self):
""" Get SickChill's user default configuration value """
any_qualities, best_qualities = _map_quality(sickbeard.QUALITY_DEFAULT)
data = {
"status": statusStrings[sickbeard.STATUS_DEFAULT].lower(),
"flatten_folders": int(not sickbeard.SEASON_FOLDERS_DEFAULT),
"season_folders": int(sickbeard.SEASON_FOLDERS_DEFAULT),
"initial": any_qualities, "archive": best_qualities,
"future_show_paused": int(sickbeard.COMING_EPS_DISPLAY_PAUSED)
}
return _responds(RESULT_SUCCESS, data)
# noinspection PyAbstractClass
class CMDSickBeardGetMessages(ApiCall):
_help = {"desc": "Get all messages"}
def __init__(self, args, kwargs):
super(CMDSickBeardGetMessages, self).__init__(args, kwargs)
def run(self):
messages = []
for cur_notification in ui.notifications.get_notifications(self.rh.request.remote_ip):
messages.append({
"title": cur_notification.title,
"message": cur_notification.message,
"type": cur_notification.type
})
return _responds(RESULT_SUCCESS, messages)
# noinspection PyAbstractClass
class CMDSickBeardGetRootDirs(ApiCall):
_help = {"desc": "Get all root (parent) directories"}
def __init__(self, args, kwargs):
super(CMDSickBeardGetRootDirs, self).__init__(args, kwargs)
def run(self):
""" Get all root (parent) directories """
return _responds(RESULT_SUCCESS, _get_root_dirs())
# noinspection PyAbstractClass
class CMDSickBeardPauseBacklog(ApiCall):
_help = {
"desc": "Pause or un-pause the backlog search",
"optionalParameters": {
"pause": {"desc": "True to pause the backlog search, False to un-pause it"}
}
}
def __init__(self, args, kwargs):
super(CMDSickBeardPauseBacklog, self).__init__(args, kwargs)
self.pause, args = self.check_params(args, kwargs, "pause", False, False, "bool", [])
def run(self):
""" Pause or un-pause the backlog search """
if self.pause:
sickbeard.searchQueueScheduler.action.pause_backlog() # @UndefinedVariable
return _responds(RESULT_SUCCESS, msg="Backlog paused")
else:
sickbeard.searchQueueScheduler.action.unpause_backlog() # @UndefinedVariable
return _responds(RESULT_SUCCESS, msg="Backlog un-paused")
# noinspection PyAbstractClass
class CMDSickBeardPing(ApiCall):
_help = {"desc": "Ping SickChill to check if it is running"}
def __init__(self, args, kwargs):
super(CMDSickBeardPing, self).__init__(args, kwargs)
def run(self):
""" Ping SickChill to check if it is running """
if sickbeard.started:
return _responds(RESULT_SUCCESS, {"pid": sickbeard.PID}, "Pong")
else:
return _responds(RESULT_SUCCESS, msg="Pong")
# noinspection PyAbstractClass
class CMDSickBeardRestart(ApiCall):
_help = {"desc": "Restart SickChill"}
def __init__(self, args, kwargs):
super(CMDSickBeardRestart, self).__init__(args, kwargs)
def run(self):
""" Restart SickChill """
if not Restart.restart(sickbeard.PID):
return _responds(RESULT_FAILURE, msg='SickChill can not be restarted')
return _responds(RESULT_SUCCESS, msg="SickChill is restarting...")
# noinspection PyAbstractClass
class CMDSickBeardSearchIndexers(ApiCall):
_help = {
"desc": "Search for a show with a given name on all the indexers, in a specific language",
"optionalParameters": {
"name": {"desc": "The name of the show you want to search for"},
"indexerid": {"desc": "Unique ID of a show"},
"lang": {"desc": "The 2-letter language code of the desired show"},
"only_new": {"desc": "Discard shows that are already in your show list"},
}
}
def __init__(self, args, kwargs):
super(CMDSickBeardSearchIndexers, self).__init__(args, kwargs)
self.valid_languages = sickchill.indexer.lang_dict()
self.name, args = self.check_params(args, kwargs, "name", None, False, "string", [])
self.lang, args = self.check_params(args, kwargs, "lang", sickbeard.INDEXER_DEFAULT_LANGUAGE, False, "string",
self.valid_languages.keys())
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, False, "int", [])
self.only_new, args = self.check_params(args, kwargs, "only_new", True, False, "bool", [])
def run(self):
""" Search for a show with a given name on all the indexers, in a specific language """
results = []
lang_id = self.valid_languages[self.lang]
if self.name and not self.indexerid: # only name was given
search_results = sickchill.indexer.search_indexers_for_series_name(str(self.name).encode(), self.lang)
for indexer, indexer_results in six.iteritems(search_results):
for result in indexer_results:
# Skip it if it's in our show list already, and we only want new shows
# noinspection PyUnresolvedReferences
in_show_list = sickbeard.tv.Show.find(sickbeard.showList, int(result.id))
if in_show_list and self.only_new:
continue
results.append({
indexer_ids[indexer]: result['id'],
"name": result['seriesName'],
"first_aired": result['firstAired'],
"indexer": indexer,
"in_show_list": in_show_list
})
return _responds(RESULT_SUCCESS, {"results": results, "langid": lang_id})
elif self.indexerid:
indexer, result = sickchill.indexer.search_indexers_for_series_id(indexerid=self.indexerid, language=self.lang)
if not indexer:
logger.log("API :: Unable to find show with id " + str(self.indexerid), logger.WARNING)
return _responds(RESULT_SUCCESS, {"results": [], "langid": lang_id})
if not result.seriesName:
logger.log(
"API :: Found show with indexerid: " + str(self.indexerid) + ", however it contained no show name", logger.DEBUG)
return _responds(RESULT_FAILURE, msg="Show contains no name, invalid result")
results = [{
indexer_ids[indexer]: result.id,
"name": six.text_type(result.seriesName),
"first_aired": result.firstAired,
"indexer": indexer
}]
return _responds(RESULT_SUCCESS, {"results": results, "langid": lang_id})
else:
return _responds(RESULT_FAILURE, msg="Either a unique id or name is required!")
# noinspection PyAbstractClass
class CMDSickBeardSearchTVDB(CMDSickBeardSearchIndexers):
_help = {
"desc": "Search for a show with a given name on The TVDB, in a specific language",
"optionalParameters": {
"name": {"desc": "The name of the show you want to search for"},
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
"lang": {"desc": "The 2-letter language code of the desired show"},
}
}
def __init__(self, args, kwargs):
CMDSickBeardSearchIndexers.__init__(self, args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "tvdbid", None, False, "int", [])
# noinspection PyAbstractClass
class CMDSickBeardSearchTVRAGE(CMDSickBeardSearchIndexers):
"""
Deprecated, TVRage is no more.
"""
_help = {
"desc":
"Search for a show with a given name on TVRage, in a specific language. "
"This command should not longer be used, as TVRage was shut down.",
"optionalParameters": {
"name": {"desc": "The name of the show you want to search for"},
"lang": {"desc": "The 2-letter language code of the desired show"},
}
}
def __init__(self, args, kwargs):
super(CMDSickBeardSearchTVRAGE, self).__init__(args, kwargs)
def run(self):
return _responds(RESULT_FAILURE, msg="TVRage is no more, invalid result")
# noinspection PyAbstractClass
class CMDSickBeardSetDefaults(ApiCall):
_help = {
"desc": "Set SickChill's user default configuration value",
"optionalParameters": {
"initial": {"desc": "The initial quality of a show"},
"archive": {"desc": "The archive quality of a show"},
"future_show_paused": {"desc": "True to list paused shows in the coming episode, False otherwise"},
"season_folders": {"desc": "Group episodes in season folders within the show directory"},
"status": {"desc": "Status of missing episodes"},
}
}
def __init__(self, args, kwargs):
super(CMDSickBeardSetDefaults, self).__init__(args, kwargs)
self.initial, args = self.check_params(args, kwargs, "initial", [], False, "list", ALLOWED_QUALITY_LIST)
self.archive, args = self.check_params(args, kwargs, "archive", [], False, "list", PREFERRED_QUALITY_LIST)
self.future_show_paused, args = self.check_params(args, kwargs, "future_show_paused", None, False, "bool", [])
self.season_folders, args = self.check_params(args, kwargs, "flatten_folders", not bool(sickbeard.SEASON_FOLDERS_DEFAULT), False, "bool", [])
self.season_folders, args = self.check_params(args, kwargs, "season_folders", self.season_folders, False, "bool", [])
self.status, args = self.check_params(args, kwargs, "status", None, False, "string",
["wanted", "skipped", "ignored"])
def run(self):
""" Set SickChill's user default configuration value """
i_quality_id = []
a_quality_id = []
if self.initial:
# noinspection PyTypeChecker
for quality in self.initial:
i_quality_id.append(QUALITY_MAP[quality])
if self.archive:
# noinspection PyTypeChecker
for quality in self.archive:
a_quality_id.append(QUALITY_MAP[quality])
if i_quality_id or a_quality_id:
sickbeard.QUALITY_DEFAULT = Quality.combineQualities(i_quality_id, a_quality_id)
if self.status:
# convert the string status to a int
for status in statusStrings:
if statusStrings[status].lower() == str(self.status).lower():
self.status = status
break
# this should be obsolete because of the above
if self.status not in statusStrings:
raise ApiError("Invalid Status")
# only allow the status options we want
if int(self.status) not in (3, 5, 6, 7):
raise ApiError("Status Prohibited")
sickbeard.STATUS_DEFAULT = self.status
if self.season_folders is not None:
sickbeard.SEASON_FOLDERS_DEFAULT = int(self.season_folders)
if self.future_show_paused is not None:
sickbeard.COMING_EPS_DISPLAY_PAUSED = int(self.future_show_paused)
return _responds(RESULT_SUCCESS, msg="Saved defaults")
# noinspection PyAbstractClass
class CMDSickBeardShutdown(ApiCall):
_help = {"desc": "Shutdown SickChill"}
def __init__(self, args, kwargs):
super(CMDSickBeardShutdown, self).__init__(args, kwargs)
def run(self):
""" Shutdown SickChill """
if not Shutdown.stop(sickbeard.PID):
return _responds(RESULT_FAILURE, msg='SickChill can not be shut down')
return _responds(RESULT_SUCCESS, msg="SickChill is shutting down...")
# noinspection PyAbstractClass
class CMDSickBeardUpdate(ApiCall):
_help = {"desc": "Update SickChill to the latest version available"}
def __init__(self, args, kwargs):
super(CMDSickBeardUpdate, self).__init__(args, kwargs)
def run(self):
check_version = CheckVersion()
if check_version.check_for_new_version():
if check_version.run_backup_if_safe():
check_version.update()
return _responds(RESULT_SUCCESS, msg="SickChill is updating ...")
return _responds(RESULT_FAILURE, msg="SickChill could not backup config ...")
return _responds(RESULT_FAILURE, msg="SickChill is already up to date")
# noinspection PyAbstractClass
class CMDShow(ApiCall):
_help = {
"desc": "Get detailed information about a show",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
}
}
def __init__(self, args, kwargs):
super(CMDShow, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
def run(self):
""" Get detailed information about a show """
show_obj = Show.find(sickbeard.showList, int(self.indexerid))
if not show_obj:
return _responds(RESULT_FAILURE, msg="Show not found")
show_dict = {
"season_list": CMDShowSeasonList((), {"indexerid": self.indexerid}).run()["data"],
"cache": CMDShowCache((), {"indexerid": self.indexerid}).run()["data"],
"genre": show_obj.genre, "quality": get_quality_string(show_obj.quality)
}
any_qualities, best_qualities = _map_quality(show_obj.quality)
show_dict["quality_details"] = {"initial": any_qualities, "archive": best_qualities}
try:
show_dict["location"] = show_obj.location
except ShowDirectoryNotFoundException:
show_dict["location"] = ""
show_dict["language"] = show_obj.lang
show_dict["show_name"] = show_obj.name
show_dict["paused"] = (0, 1)[show_obj.paused]
show_dict["subtitles"] = (0, 1)[show_obj.subtitles]
show_dict["air_by_date"] = (0, 1)[show_obj.air_by_date]
show_dict["season_folders"] = (0, 1)[show_obj.season_folders]
show_dict["sports"] = (0, 1)[show_obj.sports]
show_dict["anime"] = (0, 1)[show_obj.anime]
show_dict["airs"] = str(show_obj.airs).replace('am', ' AM').replace('pm', ' PM').replace(' ', ' ')
show_dict["dvdorder"] = (0, 1)[show_obj.dvdorder]
if show_obj.rls_require_words:
show_dict["rls_require_words"] = show_obj.rls_require_words.split(", ")
else:
show_dict["rls_require_words"] = []
if show_obj.rls_prefer_words:
show_dict["rls_prefer_words"] = show_obj.rls_prefer_words.split(", ")
else:
show_dict["rls_prefer_words"] = []
if show_obj.rls_ignore_words:
show_dict["rls_ignore_words"] = show_obj.rls_ignore_words.split(", ")
else:
show_dict["rls_ignore_words"] = []
show_dict["scene"] = (0, 1)[show_obj.scene]
# show_dict["archive_firstmatch"] = (0, 1)[show_obj.archive_firstmatch]
# This might need to be here for 3rd part apps?
show_dict["archive_firstmatch"] = 1
show_dict["indexerid"] = show_obj.indexerid
show_dict["tvdbid"] = show_obj.indexerid
show_dict["imdbid"] = show_obj.imdbid
show_dict["network"] = show_obj.network
if not show_dict["network"]:
show_dict["network"] = ""
show_dict["status"] = show_obj.status
if try_int(show_obj.nextaired, 1) > 693595:
dt_episode_airs = sbdatetime.sbdatetime.convert_to_setting(
network_timezones.parse_date_time(show_obj.nextaired, show_dict['airs'], show_dict['network']))
show_dict['airs'] = sbdatetime.sbdatetime.sbftime(dt_episode_airs, t_preset=timeFormat).lstrip('0').replace(
' 0', ' ')
show_dict['next_ep_airdate'] = sbdatetime.sbdatetime.sbfdate(dt_episode_airs, d_preset=dateFormat)
else:
show_dict['next_ep_airdate'] = ''
return _responds(RESULT_SUCCESS, show_dict)
# noinspection PyAbstractClass
class CMDShowAddExisting(ApiCall):
_help = {
"desc": "Add an existing show in SickChill",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
"location": {"desc": "Full path to the existing shows's folder"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
"initial": {"desc": "The initial quality of the show"},
"archive": {"desc": "The archive quality of the show"},
"season_folders": {"desc": "True to group episodes in season folders, False otherwise"},
"subtitles": {"desc": "True to search for subtitles, False otherwise"},
}
}
def __init__(self, args, kwargs):
super(CMDShowAddExisting, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "", [])
self.location, args = self.check_params(args, kwargs, "location", None, True, "string", [])
self.initial, args = self.check_params(args, kwargs, "initial", [], False, "list", ALLOWED_QUALITY_LIST)
self.archive, args = self.check_params(args, kwargs, "archive", [], False, "list", PREFERRED_QUALITY_LIST)
self.season_folders, args = self.check_params(args, kwargs, "flatten_folders",
bool(sickbeard.SEASON_FOLDERS_DEFAULT), False, "bool", [])
self.season_folders, args = self.check_params(args, kwargs, "season_folders",
self.season_folders, False, "bool", [])
self.subtitles, args = self.check_params(args, kwargs, "subtitles", int(sickbeard.USE_SUBTITLES),
False, "int", [])
def run(self):
""" Add an existing show in SickChill """
show_obj = Show.find(sickbeard.showList, int(self.indexerid))
if show_obj:
return _responds(RESULT_FAILURE, msg="An existing indexerid already exists in the database")
if not ek(os.path.isdir, self.location):
return _responds(RESULT_FAILURE, msg='Not a valid location')
indexer_name = None
indexer_result = CMDSickBeardSearchIndexers([], {indexer_ids[self.indexer]: self.indexerid}).run()
if indexer_result[b'result'] == result_type_map[RESULT_SUCCESS]:
if not indexer_result[b'data']['results']:
return _responds(RESULT_FAILURE, msg="Empty results returned, check indexerid and try again")
if len(indexer_result[b'data']['results']) == 1 and 'name' in indexer_result[b'data']['results'][0]:
indexer_name = indexer_result[b'data']['results'][0]['name']
if not indexer_name:
return _responds(RESULT_FAILURE, msg="Unable to retrieve information from indexer")
# set indexer so we can pass it along when adding show to SR
indexer = indexer_result[b'data']['results'][0]['indexer']
# use default quality as a fail-safe
new_quality = int(sickbeard.QUALITY_DEFAULT)
i_quality_id = []
a_quality_id = []
if self.initial:
# noinspection PyTypeChecker
for quality in self.initial:
i_quality_id.append(QUALITY_MAP[quality])
if self.archive:
# noinspection PyTypeChecker
for quality in self.archive:
a_quality_id.append(QUALITY_MAP[quality])
if i_quality_id or a_quality_id:
new_quality = Quality.combineQualities(i_quality_id, a_quality_id)
sickbeard.showQueueScheduler.action.add_show(
int(indexer), int(self.indexerid), self.location,
default_status=sickbeard.STATUS_DEFAULT, quality=new_quality,
season_folders=int(self.season_folders), subtitles=self.subtitles,
default_status_after=sickbeard.STATUS_DEFAULT_AFTER
)
return _responds(RESULT_SUCCESS, {"name": indexer_name}, indexer_name + " has been queued to be added")
# noinspection PyAbstractClass
class CMDShowAddNew(ApiCall):
_help = {
"desc": "Add a new show to SickChill",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
"initial": {"desc": "The initial quality of the show"},
"location": {"desc": "The path to the folder where the show should be created"},
"archive": {"desc": "The archive quality of the show"},
"season_folders": {"desc": "True to group episodes in season folders, False otherwise"},
"status": {"desc": "The status of missing episodes"},
"lang": {"desc": "The 2-letter language code of the desired show"},
"subtitles": {"desc": "True to search for subtitles, False otherwise"},
"anime": {"desc": "True to mark the show as an anime, False otherwise"},
"scene": {"desc": "True if episodes search should be made by scene numbering, False otherwise"},
"future_status": {"desc": "The status of future episodes"},
}
}
def __init__(self, args, kwargs):
super(CMDShowAddNew, self).__init__(args, kwargs)
self.valid_languages = sickchill.indexer.lang_dict()
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
self.location, args = self.check_params(args, kwargs, "location", None, False, "string", [])
self.initial, args = self.check_params(
args, kwargs, "initial", None, False, "list", ALLOWED_QUALITY_LIST)
self.archive, args = self.check_params(
args, kwargs, "archive", None, False, "list", PREFERRED_QUALITY_LIST)
self.season_folders, args = self.check_params(args, kwargs, "flatten_folders",
bool(sickbeard.SEASON_FOLDERS_DEFAULT), False, "bool", [])
self.season_folders, args = self.check_params(args, kwargs, "season_folders",
self.season_folders, False, "bool", [])
self.status, args = self.check_params(args, kwargs, "status", None, False, "string",
["wanted", "skipped", "ignored"])
self.lang, args = self.check_params(args, kwargs, "lang", sickbeard.INDEXER_DEFAULT_LANGUAGE, False, "string",
self.valid_languages.keys())
self.subtitles, args = self.check_params(args, kwargs, "subtitles", bool(sickbeard.USE_SUBTITLES),
False, "bool", [])
self.anime, args = self.check_params(args, kwargs, "anime", bool(sickbeard.ANIME_DEFAULT), False,
"bool", [])
self.scene, args = self.check_params(args, kwargs, "scene", bool(sickbeard.SCENE_DEFAULT), False,
"bool", [])
self.future_status, args = self.check_params(args, kwargs, "future_status", None, False, "string",
["wanted", "skipped", "ignored"])
def run(self):
""" Add a new show to SickChill """
show_obj = Show.find(sickbeard.showList, int(self.indexerid))
if show_obj:
return _responds(RESULT_FAILURE, msg="An existing indexerid already exists in database")
if not self.location:
if sickbeard.ROOT_DIRS != "":
root_dirs = sickbeard.ROOT_DIRS.split('|')
root_dirs.pop(0)
default_index = int(sickbeard.ROOT_DIRS.split('|')[0])
self.location = root_dirs[default_index]
else:
return _responds(RESULT_FAILURE, msg="Root directory is not set, please provide a location")
if not ek(os.path.isdir, self.location):
return _responds(RESULT_FAILURE, msg="'" + self.location + "' is not a valid location")
# use default quality as a fail-safe
new_quality = int(sickbeard.QUALITY_DEFAULT)
i_quality_id = []
a_quality_id = []
if self.initial:
# noinspection PyTypeChecker
for quality in self.initial:
i_quality_id.append(QUALITY_MAP[quality])
if self.archive:
# noinspection PyTypeChecker
for quality in self.archive:
a_quality_id.append(QUALITY_MAP[quality])
if i_quality_id or a_quality_id:
new_quality = Quality.combineQualities(i_quality_id, a_quality_id)
# use default status as a fail-safe
new_status = sickbeard.STATUS_DEFAULT
if self.status:
# convert the string status to a int
for status in statusStrings:
if statusStrings[status].lower() == str(self.status).lower():
self.status = status
break
if self.status not in statusStrings:
raise ApiError("Invalid Status")
# only allow the status options we want
if int(self.status) not in (WANTED, SKIPPED, IGNORED):
return _responds(RESULT_FAILURE, msg="Status prohibited")
new_status = self.status
# use default status as a fail-safe
default_ep_status_after = sickbeard.STATUS_DEFAULT_AFTER
if self.future_status:
# convert the string status to a int
for status in statusStrings:
if statusStrings[status].lower() == str(self.future_status).lower():
self.future_status = status
break
if self.future_status not in statusStrings:
raise ApiError("Invalid Status")
# only allow the status options we want
if int(self.future_status) not in (WANTED, SKIPPED, IGNORED):
return _responds(RESULT_FAILURE, msg="Status prohibited")
default_ep_status_after = self.future_status
indexer_name = None
indexer_result = CMDSickBeardSearchIndexers([], {indexer_ids[self.indexer]: self.indexerid, 'lang': self.lang}).run()
if indexer_result[b'result'] == result_type_map[RESULT_SUCCESS]:
if not indexer_result[b'data']['results']:
return _responds(RESULT_FAILURE, msg="Empty results returned, check indexerid and try again")
if len(indexer_result[b'data']['results']) == 1 and 'name' in indexer_result[b'data']['results'][0]:
indexer_name = indexer_result[b'data']['results'][0]['name']
if not indexer_name:
return _responds(RESULT_FAILURE, msg="Unable to retrieve information from indexer")
# set indexer for found show so we can pass it along
indexer = indexer_result[b'data']['results'][0]['indexer']
# moved the logic check to the end in an attempt to eliminate empty directory being created from previous errors
show_path = ek(os.path.join, self.location, sanitize_filename(indexer_name))
# don't create show dir if config says not to
if sickbeard.ADD_SHOWS_WO_DIR:
logger.log("Skipping initial creation of " + show_path + " due to config.ini setting")
else:
dir_exists = helpers.makeDir(show_path)
if not dir_exists:
logger.log("API :: Unable to create the folder " + show_path + ", can't add the show", logger.ERROR)
return _responds(RESULT_FAILURE, {"path": show_path},
"Unable to create the folder " + show_path + ", can't add the show")
else:
helpers.chmodAsParent(show_path)
sickbeard.showQueueScheduler.action.add_show(
int(indexer), int(self.indexerid), show_path, default_status=new_status,
quality=new_quality, season_folders=int(self.season_folders),
lang=self.lang, subtitles=self.subtitles, anime=self.anime,
scene=self.scene, default_status_after=default_ep_status_after
)
return _responds(RESULT_SUCCESS, {"name": indexer_name}, indexer_name + " has been queued to be added")
# noinspection PyAbstractClass
class CMDShowCache(ApiCall):
_help = {
"desc": "Check SickChill's cache to see if the images (poster, banner, fanart) for a show are valid",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
}
}
def __init__(self, args, kwargs):
super(CMDShowCache, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
def run(self):
""" Check SickChill's cache to see if the images (poster, banner, fanart) for a show are valid """
show_obj = Show.find(sickbeard.showList, int(self.indexerid))
if not show_obj:
return _responds(RESULT_FAILURE, msg="Show not found")
# TODO: catch if cache dir is missing/invalid.. so it doesn't break show/show.cache
# return {"poster": 0, "banner": 0}
cache_obj = image_cache.ImageCache()
has_poster = 0
has_banner = 0
if ek(os.path.isfile, cache_obj.poster_path(show_obj.indexerid)):
has_poster = 1
if ek(os.path.isfile, cache_obj.banner_path(show_obj.indexerid)):
has_banner = 1
return _responds(RESULT_SUCCESS, {"poster": has_poster, "banner": has_banner})
# noinspection PyAbstractClass
class CMDShowDelete(ApiCall):
_help = {
"desc": "Delete a show in SickChill",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
"removefiles": {
"desc": "True to delete the files associated with the show, False otherwise. This can not be undone!"
},
}
}
def __init__(self, args, kwargs):
super(CMDShowDelete, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
self.removefiles, args = self.check_params(args, kwargs, "removefiles", False, False, "bool", [])
def run(self):
""" Delete a show in SickChill """
error, show = Show.delete(self.indexerid, self.removefiles)
if error:
return _responds(RESULT_FAILURE, msg=error)
return _responds(RESULT_SUCCESS, msg='{0} has been queued to be deleted'.format(show.name))
# noinspection PyAbstractClass
class CMDShowGetQuality(ApiCall):
_help = {
"desc": "Get the quality setting of a show",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
}
}
def __init__(self, args, kwargs):
super(CMDShowGetQuality, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
def run(self):
""" Get the quality setting of a show """
show_obj = Show.find(sickbeard.showList, int(self.indexerid))
if not show_obj:
return _responds(RESULT_FAILURE, msg="Show not found")
any_qualities, best_qualities = _map_quality(show_obj.quality)
return _responds(RESULT_SUCCESS, {"initial": any_qualities, "archive": best_qualities})
# noinspection PyAbstractClass
class CMDShowGetPoster(ApiCall):
_help = {
"desc": "Get the poster of a show",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
"media_format": {"desc": '"normal" for normal size poster (default), "thumb" for small size poster'},
}
}
def __init__(self, args, kwargs):
super(CMDShowGetPoster, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
self.media_format, args = self.check_params(args, kwargs, "media_format", "normal", False, "string", ["normal", "thumb"])
def run(self):
""" Get the poster a show """
return {
'outputType': 'image',
'image': ShowPoster(self.indexerid, self.media_format),
}
# noinspection PyAbstractClass
class CMDShowGetBanner(ApiCall):
_help = {
"desc": "Get the banner of a show",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
"media_format": {"desc": '"normal" for normal size banner (default), "thumb" for small size banner'},
}
}
def __init__(self, args, kwargs):
super(CMDShowGetBanner, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
self.media_format, args = self.check_params(args, kwargs, "media_format", "normal", False, "string", ["normal", "thumb"])
def run(self):
""" Get the banner of a show """
return {
'outputType': 'image',
'image': ShowBanner(self.indexerid, self.media_format),
}
# noinspection PyAbstractClass
class CMDShowGetNetworkLogo(ApiCall):
_help = {
"desc": "Get the network logo of a show",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
}
}
def __init__(self, args, kwargs):
super(CMDShowGetNetworkLogo, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
def run(self):
"""
:return: Get the network logo of a show
"""
return {
'outputType': 'image',
'image': ShowNetworkLogo(self.indexerid),
}
# noinspection PyAbstractClass
class CMDShowGetFanArt(ApiCall):
_help = {
"desc": "Get the fan art of a show",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
}
}
def __init__(self, args, kwargs):
super(CMDShowGetFanArt, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
def run(self):
""" Get the fan art of a show """
return {
'outputType': 'image',
'image': ShowFanArt(self.indexerid),
}
# noinspection PyAbstractClass
class CMDShowPause(ApiCall):
_help = {
"desc": "Pause or un-pause a show",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
"pause": {"desc": "True to pause the show, False otherwise"},
}
}
def __init__(self, args, kwargs):
super(CMDShowPause, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
self.pause, args = self.check_params(args, kwargs, "pause", False, False, "bool", [])
def run(self):
""" Pause or un-pause a show """
error, show = Show.pause(self.indexerid, self.pause)
if error:
return _responds(RESULT_FAILURE, msg=error)
return _responds(RESULT_SUCCESS, msg='{0} has been {1}'.format(show.name, ('resumed', 'paused')[show.paused]))
# noinspection PyAbstractClass
class CMDShowRefresh(ApiCall):
_help = {
"desc": "Refresh a show in SickChill",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
}
}
def __init__(self, args, kwargs):
super(CMDShowRefresh, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
def run(self):
""" Refresh a show in SickChill """
error, show = Show.refresh(self.indexerid)
if error:
return _responds(RESULT_FAILURE, msg=error)
return _responds(RESULT_SUCCESS, msg='{0} has queued to be refreshed'.format(show.name))
# noinspection PyAbstractClass
class CMDShowSeasonList(ApiCall):
_help = {
"desc": "Get the list of seasons of a show",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
"sort": {"desc": "Return the seasons in ascending or descending order"}
}
}
def __init__(self, args, kwargs):
super(CMDShowSeasonList, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
self.sort, args = self.check_params(args, kwargs, "sort", "desc", False, "string", ["asc", "desc"])
def run(self):
""" Get the list of seasons of a show """
show_obj = Show.find(sickbeard.showList, int(self.indexerid))
if not show_obj:
return _responds(RESULT_FAILURE, msg="Show not found")
main_db_con = db.DBConnection(row_type="dict")
if self.sort == "asc":
sql_results = main_db_con.select("SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season ASC",
[self.indexerid])
else:
sql_results = main_db_con.select("SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season DESC",
[self.indexerid])
season_list = [] # a list with all season numbers
for row in sql_results:
season_list.append(int(row[b"season"]))
return _responds(RESULT_SUCCESS, season_list)
# noinspection PyAbstractClass
class CMDShowSeasons(ApiCall):
_help = {
"desc": "Get the list of episodes for one or all seasons of a show",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
"season": {"desc": "The season number"},
}
}
def __init__(self, args, kwargs):
super(CMDShowSeasons, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
self.season, args = self.check_params(args, kwargs, "season", None, False, "int", [])
def run(self):
""" Get the list of episodes for one or all seasons of a show """
sho_obj = Show.find(sickbeard.showList, int(self.indexerid))
if not sho_obj:
return _responds(RESULT_FAILURE, msg="Show not found")
main_db_con = db.DBConnection(row_type="dict")
if self.season is None:
sql_results = main_db_con.select(
"SELECT name, episode, airdate, status, release_name, season, location, file_size, subtitles FROM tv_episodes WHERE showid = ?",
[self.indexerid])
seasons = {}
for row in sql_results:
status, quality = Quality.splitCompositeStatus(int(row[b"status"]))
row[b"status"] = _get_status_strings(status)
row[b"quality"] = get_quality_string(quality)
if try_int(row[b'airdate'], 1) > 693595: # 1900
dt_episode_airs = sbdatetime.sbdatetime.convert_to_setting(
network_timezones.parse_date_time(row[b'airdate'], sho_obj.airs, sho_obj.network))
row[b'airdate'] = sbdatetime.sbdatetime.sbfdate(dt_episode_airs, d_preset=dateFormat)
else:
row[b'airdate'] = 'Never'
cur_season = int(row[b"season"])
cur_episode = int(row[b"episode"])
del row[b"season"]
del row[b"episode"]
if cur_season not in seasons:
seasons[cur_season] = {}
seasons[cur_season][cur_episode] = row
else:
sql_results = main_db_con.select(
"SELECT name, episode, airdate, status, location, file_size, release_name, subtitles FROM tv_episodes WHERE showid = ? AND season = ?",
[self.indexerid, self.season])
if not sql_results:
return _responds(RESULT_FAILURE, msg="Season not found")
seasons = {}
for row in sql_results:
cur_episode = int(row[b"episode"])
del row[b"episode"]
status, quality = Quality.splitCompositeStatus(int(row[b"status"]))
row[b"status"] = _get_status_strings(status)
row[b"quality"] = get_quality_string(quality)
if try_int(row[b'airdate'], 1) > 693595: # 1900
dt_episode_airs = sbdatetime.sbdatetime.convert_to_setting(
network_timezones.parse_date_time(row[b'airdate'], sho_obj.airs, sho_obj.network))
row[b'airdate'] = sbdatetime.sbdatetime.sbfdate(dt_episode_airs, d_preset=dateFormat)
else:
row[b'airdate'] = 'Never'
if cur_episode not in seasons:
seasons[cur_episode] = {}
seasons[cur_episode] = row
return _responds(RESULT_SUCCESS, seasons)
# noinspection PyAbstractClass
class CMDShowSetQuality(ApiCall):
_help = {
"desc": "Set the quality setting of a show. If no quality is provided, the default user setting is used.",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
"initial": {"desc": "The initial quality of the show"},
"archive": {"desc": "The archive quality of the show"},
}
}
def __init__(self, args, kwargs):
super(CMDShowSetQuality, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
self.initial, args = self.check_params(args, kwargs, "initial", [], False, "list", ALLOWED_QUALITY_LIST)
self.archive, args = self.check_params(args, kwargs, "archive", [], False, "list", PREFERRED_QUALITY_LIST)
def run(self):
""" Set the quality setting of a show. If no quality is provided, the default user setting is used. """
show_obj = Show.find(sickbeard.showList, int(self.indexerid))
if not show_obj:
return _responds(RESULT_FAILURE, msg="Show not found")
# use default quality as a fail-safe
new_quality = int(sickbeard.QUALITY_DEFAULT)
i_quality_id = []
a_quality_id = []
if self.initial:
# noinspection PyTypeChecker
for quality in self.initial:
i_quality_id.append(QUALITY_MAP[quality])
if self.archive:
# noinspection PyTypeChecker
for quality in self.archive:
a_quality_id.append(QUALITY_MAP[quality])
if i_quality_id or a_quality_id:
new_quality = Quality.combineQualities(i_quality_id, a_quality_id)
show_obj.quality = new_quality
return _responds(RESULT_SUCCESS,
msg=show_obj.name + " quality has been changed to " + get_quality_string(show_obj.quality))
# noinspection PyAbstractClass
class CMDShowStats(ApiCall):
_help = {
"desc": "Get episode statistics for a given show",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
}
}
def __init__(self, args, kwargs):
super(CMDShowStats, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
def run(self):
""" Get episode statistics for a given show """
show_obj = Show.find(sickbeard.showList, int(self.indexerid))
if not show_obj:
return _responds(RESULT_FAILURE, msg="Show not found")
# show stats
episode_status_counts_total = {"total": 0}
for status in statusStrings:
if status in [UNKNOWN, DOWNLOADED, SNATCHED, SNATCHED_PROPER, ARCHIVED]:
continue
episode_status_counts_total[status] = 0
# add all the downloaded qualities
episode_qualities_counts_download = {"total": 0}
for statusCode in Quality.DOWNLOADED + Quality.ARCHIVED:
status, quality = Quality.splitCompositeStatus(statusCode)
if quality in [Quality.NONE]:
continue
episode_qualities_counts_download[statusCode] = 0
# add all snatched qualities
episode_qualities_counts_snatch = {"total": 0}
for statusCode in Quality.SNATCHED + Quality.SNATCHED_PROPER:
status, quality = Quality.splitCompositeStatus(statusCode)
if quality in [Quality.NONE]:
continue
episode_qualities_counts_snatch[statusCode] = 0
main_db_con = db.DBConnection(row_type="dict")
sql_results = main_db_con.select("SELECT status, season FROM tv_episodes WHERE season != 0 AND showid = ?",
[self.indexerid])
# the main loop that goes through all episodes
for row in sql_results:
status, quality = Quality.splitCompositeStatus(int(row[b"status"]))
episode_status_counts_total["total"] += 1
if status in Quality.DOWNLOADED + Quality.ARCHIVED:
episode_qualities_counts_download["total"] += 1
# noinspection PyTypeChecker
episode_qualities_counts_download[int(row[b"status"])] += 1
elif status in Quality.SNATCHED + Quality.SNATCHED_PROPER:
episode_qualities_counts_snatch["total"] += 1
# noinspection PyTypeChecker
episode_qualities_counts_snatch[int(row[b"status"])] += 1
elif status > 0: # we don't count NONE = 0 = N/A
episode_status_counts_total[status] += 1
# the outgoing container
episodes_stats = {"downloaded": {}}
# turning codes into strings
for statusCode in episode_qualities_counts_download:
if statusCode == "total":
episodes_stats["downloaded"]["total"] = episode_qualities_counts_download[statusCode]
continue
status, quality = Quality.splitCompositeStatus(int(statusCode))
status_string = Quality.qualityStrings[quality].lower().replace(" ", "_").replace("(", "").replace(")", "")
episodes_stats["downloaded"][status_string] = episode_qualities_counts_download[statusCode]
episodes_stats["snatched"] = {}
# turning codes into strings
# and combining proper and normal
for statusCode in episode_qualities_counts_snatch:
if statusCode == "total":
episodes_stats["snatched"]["total"] = episode_qualities_counts_snatch[statusCode]
continue
status, quality = Quality.splitCompositeStatus(int(statusCode))
status_string = Quality.qualityStrings[quality].lower().replace(" ", "_").replace("(", "").replace(")", "")
if Quality.qualityStrings[quality] in episodes_stats["snatched"]:
episodes_stats["snatched"][status_string] += episode_qualities_counts_snatch[statusCode]
else:
episodes_stats["snatched"][status_string] = episode_qualities_counts_snatch[statusCode]
for statusCode in episode_status_counts_total:
if statusCode == "total":
episodes_stats["total"] = episode_status_counts_total[statusCode]
continue
# status, quality = Quality.splitCompositeStatus(int(statusCode))
status_string = statusStrings[statusCode].lower().replace(" ", "_").replace("(", "").replace(
")", "")
episodes_stats[status_string] = episode_status_counts_total[statusCode]
return _responds(RESULT_SUCCESS, episodes_stats)
# noinspection PyAbstractClass
class CMDShowUpdate(ApiCall):
_help = {
"desc": "Update a show in SickChill",
"requiredParameters": {
"indexerid": {"desc": "Unique ID of a show"},
},
"optionalParameters": {
"tvdbid": {"desc": "thetvdb.com unique ID of a show"},
}
}
def __init__(self, args, kwargs):
super(CMDShowUpdate, self).__init__(args, kwargs)
self.indexerid, args = self.check_params(args, kwargs, "indexerid", None, True, "int", [])
def run(self):
""" Update a show in SickChill """
show_obj = Show.find(sickbeard.showList, int(self.indexerid))
if not show_obj:
return _responds(RESULT_FAILURE, msg="Show not found")
try:
sickbeard.showQueueScheduler.action.update_show(show_obj, True) # @UndefinedVariable
return _responds(RESULT_SUCCESS, msg=str(show_obj.name) + " has queued to be updated")
except CantUpdateShowException as e:
logger.log("API::Unable to update show: {0}".format(e), logger.DEBUG)
return _responds(RESULT_FAILURE, msg="Unable to update " + str(show_obj.name))
# noinspection PyAbstractClass
class CMDShows(ApiCall):
_help = {
"desc": "Get all shows in SickChill",
"optionalParameters": {
"sort": {"desc": "The sorting strategy to apply to the list of shows"},
"paused": {"desc": "True: show paused, False: show un-paused, otherwise show all"},
},
}
def __init__(self, args, kwargs):
super(CMDShows, self).__init__(args, kwargs)
self.sort, args = self.check_params(args, kwargs, "sort", "id", False, "string", ["id", "name"])
self.paused, args = self.check_params(args, kwargs, "paused", None, False, "bool", [])
def run(self):
""" Get all shows in SickChill """
shows = {}
for curShow in sickbeard.showList:
# If self.paused is None: show all, 0: show un-paused, 1: show paused
if self.paused is not None and self.paused != curShow.paused:
continue
show_dict = {
"paused": (0, 1)[curShow.paused],
"quality": get_quality_string(curShow.quality),
"language": curShow.lang,
"air_by_date": (0, 1)[curShow.air_by_date],
"sports": (0, 1)[curShow.sports],
"anime": (0, 1)[curShow.anime],
"indexerid": curShow.indexerid,
"tvdbid": curShow.indexerid,
"network": curShow.network,
"show_name": curShow.name,
"status": curShow.status,
"subtitles": (0, 1)[curShow.subtitles],
}
if try_int(curShow.nextaired, 1) > 693595: # 1900
dt_episode_airs = sbdatetime.sbdatetime.convert_to_setting(
network_timezones.parse_date_time(curShow.nextaired, curShow.airs, show_dict['network']))
show_dict['next_ep_airdate'] = sbdatetime.sbdatetime.sbfdate(dt_episode_airs, d_preset=dateFormat)
else:
show_dict['next_ep_airdate'] = ''
show_dict["cache"] = CMDShowCache((), {"indexerid": curShow.indexerid}).run()["data"]
if not show_dict["network"]:
show_dict["network"] = ""
if self.sort == "name":
shows[curShow.name] = show_dict
else:
shows[curShow.indexerid] = show_dict
return _responds(RESULT_SUCCESS, shows)
# noinspection PyAbstractClass
class CMDShowsStats(ApiCall):
_help = {"desc": "Get the global shows and episodes statistics"}
def __init__(self, args, kwargs):
super(CMDShowsStats, self).__init__(args, kwargs)
def run(self):
""" Get the global shows and episodes statistics """
stats = Show.overall_stats()
return _responds(RESULT_SUCCESS, {
'ep_downloaded': stats['episodes']['downloaded'],
'ep_snatched': stats['episodes']['snatched'],
'ep_total': stats['episodes']['total'],
'shows_active': stats['shows']['active'],
'shows_total': stats['shows']['total'],
})
# WARNING: never define a cmd call string that contains a "_" (underscore)
# this is reserved for cmd indexes used while cmd chaining
# WARNING: never define a param name that contains a "." (dot)
# this is reserved for cmd namespaces used while cmd chaining
function_mapper = {
"help": CMDHelp,
"future": CMDComingEpisodes,
"episode": CMDEpisode,
"episode.search": CMDEpisodeSearch,
"episode.setstatus": CMDEpisodeSetStatus,
"episode.subtitlesearch": CMDSubtitleSearch,
"exceptions": CMDExceptions,
"history": CMDHistory,
"history.clear": CMDHistoryClear,
"history.trim": CMDHistoryTrim,
"failed": CMDFailed,
"backlog": CMDBacklog,
"logs": CMDLogs,
"logs.clear": CMDLogsClear,
"sb": CMDSickBeard,
"postprocess": CMDPostProcess,
"sb.addrootdir": CMDSickBeardAddRootDir,
"sb.checkversion": CMDSickBeardCheckVersion,
"sb.checkscheduler": CMDSickBeardCheckScheduler,
"sb.deleterootdir": CMDSickBeardDeleteRootDir,
"sb.getdefaults": CMDSickBeardGetDefaults,
"sb.getmessages": CMDSickBeardGetMessages,
"sb.getrootdirs": CMDSickBeardGetRootDirs,
"sb.pausebacklog": CMDSickBeardPauseBacklog,
"sb.ping": CMDSickBeardPing,
"sb.restart": CMDSickBeardRestart,
"sb.dailysearch": CMDDailySearch,
"sb.propersearch": CMDProperSearch,
"sb.subtitlesearch": CMDFullSubtitleSearch,
"sb.searchindexers": CMDSickBeardSearchIndexers,
"sb.searchtvdb": CMDSickBeardSearchTVDB,
"sb.searchtvrage": CMDSickBeardSearchTVRAGE,
"sb.setdefaults": CMDSickBeardSetDefaults,
"sb.update": CMDSickBeardUpdate,
"sb.shutdown": CMDSickBeardShutdown,
"show": CMDShow,
"show.addexisting": CMDShowAddExisting,
"show.addnew": CMDShowAddNew,
"show.cache": CMDShowCache,
"show.delete": CMDShowDelete,
"show.getquality": CMDShowGetQuality,
"show.getposter": CMDShowGetPoster,
"show.getbanner": CMDShowGetBanner,
"show.getnetworklogo": CMDShowGetNetworkLogo,
"show.getfanart": CMDShowGetFanArt,
"show.pause": CMDShowPause,
"show.refresh": CMDShowRefresh,
"show.seasonlist": CMDShowSeasonList,
"show.seasons": CMDShowSeasons,
"show.setquality": CMDShowSetQuality,
"show.stats": CMDShowStats,
"show.update": CMDShowUpdate,
"shows": CMDShows,
"shows.stats": CMDShowsStats
}
| coderbone/SickRage-alt | sickchill/views/api/webapi.py | Python | gpl-3.0 | 118,111 |
from makerUtilities import writeFile
from makerUtilities import readFile
import os
def scaffold(systemDir, defaultTheme):
return (
"""<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<script src='file://"""
+ os.path.join(systemDir, "jquery.min.js")
+ """'></script>
<style type="text/css">
html {
background: -webkit-gradient(linear, left top, left bottom, from(#000), to(rgb(93,94,120)));
background-attachment:fixed;
}
body {
font-family: "Helvetica Neue";
font-size: 14px;
width:auto;
/* max-width:694px; */
color:#fff;
padding:20px 20px;
-webkit-transform: perspective( 600px );
}
a {
color: #ddd;
}
.thumbnail a {
text-decoration:none;
color:#000;
cursor:default;
}
p {
font-weight:lighter;
color:#fff;
letter-spacing:0.09em;
float:left;
font-size:0.9em;
line-height:1.45em;
text-align:left;
margin:-6px 0px 24px 10px;
}
h5 {
font-weight:lighter;
letter-spacing:0.050em;
margin:-28px 0px 0px 8px;
line-height:3em;
font-size:22px;
cursor:default;
}
img {
border:1px solid #333;
width:100%;
height:100%;
-webkit-box-reflect: below 0px -webkit-gradient(linear, left top, left bottom, from(transparent), color-stop(50%, transparent), to(rgba(0,0,0,0.2)));
-webkit-transform: perspective( 600px ) rotateY( 0deg);
margin-bottom:40px;
}
.row {
width:100%;
margin:0px 0px 40px 10px;
float:left;
clear:both;
}
.thumbnail {
width:17%;
padding:20px 20px 10px 20px;
margin:0px 20px 0px 0px;
float:left;
clear:right;
background:none;
}
.thumbnail img {
height:100px;
}
.thumbnail p {
text-align:center;
margin:-24px 0px 0px 0px;
width:100%;
font-size:14px;
cursor:default;
}
.thumbnail.selected {
border:1px solid #777;
padding:20px 20px 10px 20px;
-webkit-border-radius:10px;
background: -webkit-gradient(linear, left top, left bottom, from(rgba(140,140,140,0.1)), to(rgba(170,170,170,0.2)));
}
.info {
width:92%;
float:left;
clear:both;
display:none;
margin:40px 10px 0px 10px;
}
.info p {
float:left;
clear:right;
cursor:default;
}
.info img {
width:280px;
height:auto;
float:left;
clear:right;
margin:0px 48px 0px 8px;
-webkit-transform: perspective( 600px ) rotateY( 10deg );
/*
-webkit-transition: width, 0.5s;
*/
}
/*
.info img:hover {
width:320px;
-webkit-transform: perspective( 600px ) rotateY( 0deg );
}
*/
.info h5 {
margin-top:0px;
}
.info h5, p {
width:380px;
float:left;
}
a.button {
cursor:default;
color:#000;
}
a.button:active {
color:#000;
background: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#bbb));
}
</style>
<script type="text/javascript">
$(document).ready(function(){
$('#"""
+ defaultTheme
+ """').addClass('selected');
$('#info-"""
+ defaultTheme
+ """').show();
$('.thumbnail').click(function(){
$('.info').hide();
$('.thumbnail').removeClass('selected')
$(this).addClass('selected');
$($(this).data('info')).show();
});
});
</script>
</head>
<body>
"""
+ createThumbnails(systemDir)
+ createInfo(systemDir)
+ """
</body>
</html>
"""
)
def buildView(systemDir, viewPath):
writeFile(
os.path.join(viewPath, "yourTemplates.html"),
scaffold(systemDir, defaultTemplate()),
)
return os.path.join(viewPath, "yourTemplates.html")
def defaultTemplate():
# ===========================================================================
# This is used to set the default template for the application
# ===========================================================================
return "Simple-Markdown"
def createThumbnails(systemDir):
thumbnails = "<div class='row'>\n"
for template in os.listdir(os.path.join(systemDir, "templates")):
if not template.startswith("."):
thumbnails += makeThumbnail(systemDir, template)
thumbnails += "</div>"
return thumbnails
def createInfo(systemDir):
info = "<div class='row'>\n"
for template in os.listdir(os.path.join(systemDir, "templates")):
if not template.startswith("."):
s = readFile(
os.path.join(systemDir, "templates", template, "parts", "info.json")
)
data = eval(s)
info += makeInfo(systemDir, template, data)
info += "</div>"
return info
def makeInfo(systemDir, templateName, data):
previewImage = os.path.join(
systemDir, "templates", templateName, "parts/preview.jpg"
)
info = (
"""
<div class="info" id="info-"""
+ data["Title"]
+ """">
<img src='"""
+ previewImage
+ """' />
<h5>"""
+ data["Title"]
+ """</h5>
<p>"""
+ data["Description"]
+ """<br /><br />
Credit: """
+ data["Credit"]
+ """<br />
Support: <a href='"""
+ data["Support"]
+ """'>www.makercms.org</a><br />
</p>
</div>
"""
)
return info
def makeThumbnail(systemDir, templateName):
previewImage = os.path.join(
systemDir, "templates", templateName, "parts/preview.jpg"
)
thumbnail = (
"""
<div class='thumbnail' id='"""
+ templateName
+ """' data-info='#info-"""
+ templateName
+ """'>
<a href='--"""
+ templateName
+ """--'>
<img src='"""
+ previewImage
+ """' />
<p>"""
+ templateName
+ """</p></a>
</div>
"""
)
return thumbnail
| geraldspreer/the-maker | makerTemplateViewBuilder.py | Python | gpl-3.0 | 7,622 |
'''
Created on 11May,2016
@author: linyufeng
'''
from utils.TimeZoneConverter import TimeZoneConverter
class Asset(object):
'''
contain the values will be insert into table Asset
'''
convert = TimeZoneConverter();
def __init__(self, startTime, endTime, directory, fileName, fileType, duration, sequence):
self.startTime = self.convert.victoriaToUCT(startTime)
self.endTime = self.convert.victoriaToUCT(endTime)
self.directory = directory
self.fileName = fileName
self.fileType = fileType
self.duration = int(duration)
self.sequence = int(sequence)
def getStartTime(self):
return self.startTime
def getEndTime(self):
return self.endTime
def getDirectory(self):
return self.directory
def getFileName(self):
return self.fileName
def getFileType(self):
return self.fileType
def getDuration(self):
return self.duration
def getSequence(self):
return self.sequence
def __eq__(self,other):
if isinstance(other, self.__class__):
if self.startTime == other.startTime:
if self.endTime == other.endTime:
if self.directory == other.directory:
if self.duration == other.duration:
if self.fileName == other.fileName:
if self.fileType == other.fileType:
return True
return False
| ericlyf/screenly-tools-schedulespreadsheet | src/model/Asset.py | Python | gpl-3.0 | 1,601 |
import os
import sys
config = {
#########################################################################
######## MACOSX GENERIC CONFIG KEYS/VAlUES
'default_actions': [
'clobber',
'clone-tools',
'checkout-sources',
'build',
'upload-files',
'sendchange',
'check-test',
],
"buildbot_json_path": "buildprops.json",
'exes': {
'python2.7': sys.executable,
"buildbot": "/tools/buildbot/bin/buildbot",
},
'app_ini_path': '%(obj_dir)s/dist/bin/application.ini',
'enable_ccache': True,
'vcs_share_base': '/builds/hg-shared',
'objdir': 'obj-firefox',
'tooltool_script': ["/builds/tooltool.py"],
'tooltool_bootstrap': "setup.sh",
'enable_talos_sendchange': False,
'enable_unittest_sendchange': False,
#########################################################################
#########################################################################
###### 64 bit specific ######
'base_name': 'B2G_%(branch)s_macosx64_gecko',
'platform': 'macosx64_gecko',
'stage_platform': 'macosx64_gecko',
'stage_product': 'b2g',
'env': {
'MOZBUILD_STATE_PATH': os.path.join(os.getcwd(), '.mozbuild'),
'MOZ_AUTOMATION': '1',
'HG_SHARE_BASE_DIR': '/builds/hg-shared',
'MOZ_OBJDIR': 'obj-firefox',
'CHOWN_ROOT': '~/bin/chown_root',
'CHOWN_REVERT': '~/bin/chown_revert',
'TINDERBOX_OUTPUT': '1',
'TOOLTOOL_CACHE': '/builds/tooltool_cache',
'TOOLTOOL_HOME': '/builds',
'MOZ_CRASHREPORTER_NO_REPORT': '1',
'CCACHE_DIR': '/builds/ccache',
'CCACHE_COMPRESS': '1',
'CCACHE_UMASK': '002',
'LC_ALL': 'C',
## 64 bit specific
'PATH': '/tools/python/bin:/tools/buildbot/bin:/opt/local/bin:/usr/bin:'
'/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/X11/bin',
##
},
'upload_env': {
# stage_server is dictated from build_pool_specifics.py
'UPLOAD_HOST': '%(stage_server)s',
'UPLOAD_USER': '%(stage_username)s',
'UPLOAD_SSH_KEY': '/Users/cltbld/.ssh/%(stage_ssh_key)s',
'UPLOAD_TO_TEMP': '1',
},
"check_test_env": {
'MINIDUMP_STACKWALK': '%(abs_tools_dir)s/breakpad/osx64/minidump_stackwalk',
'MINIDUMP_SAVE_PATH': '%(base_work_dir)s/minidumps',
},
'src_mozconfig': 'b2g/config/mozconfigs/macosx64_gecko/nightly',
'tooltool_manifest_src': 'b2g/config/tooltool-manifests/macosx64/releng.manifest',
#########################################################################
}
| cstipkovic/spidermonkey-research | testing/mozharness/configs/b2g/desktop_macosx64.py | Python | mpl-2.0 | 2,633 |
from tests.support.asserts import assert_error, assert_dialog_handled
from tests.support.fixtures import create_dialog
from tests.support.inline import inline
alert_doc = inline("<script>window.alert()</script>")
def get_window_rect(session):
return session.transport.send(
"GET", "session/{session_id}/window/rect".format(**vars(session)))
def test_handle_prompt_dismiss_and_notify():
"""TODO"""
def test_handle_prompt_accept_and_notify():
"""TODO"""
def test_handle_prompt_ignore():
"""TODO"""
def test_handle_prompt_accept(new_session, add_browser_capabilites):
_, session = new_session({"capabilities": {"alwaysMatch": add_browser_capabilites({"unhandledPromptBehavior": "accept"})}})
session.url = inline("<title>WD doc title</title>")
create_dialog(session)("alert", text="dismiss #1", result_var="dismiss1")
response = get_window_rect(session)
assert response.status == 200
assert_dialog_handled(session, "dismiss #1")
create_dialog(session)("confirm", text="dismiss #2", result_var="dismiss2")
response = get_window_rect(session)
assert response.status == 200
assert_dialog_handled(session, "dismiss #2")
create_dialog(session)("prompt", text="dismiss #3", result_var="dismiss3")
response = get_window_rect(session)
assert response.status == 200
assert_dialog_handled(session, "dismiss #3")
def test_handle_prompt_missing_value(session, create_dialog):
session.url = inline("<title>WD doc title</title>")
create_dialog("alert", text="dismiss #1", result_var="dismiss1")
response = get_window_rect(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, "dismiss #1")
create_dialog("confirm", text="dismiss #2", result_var="dismiss2")
response = get_window_rect(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, "dismiss #2")
create_dialog("prompt", text="dismiss #3", result_var="dismiss3")
response = get_window_rect(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, "dismiss #3")
| mbrubeck/servo | tests/wpt/web-platform-tests/webdriver/tests/get_window_rect/user_prompts.py | Python | mpl-2.0 | 2,153 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
from string import Template
from uuid import uuid4
import pytest
from moztelemetry.store import InMemoryStore
from moztelemetry.dataset import Dataset
from moztelemetry.spark import get_pings
@pytest.fixture()
def test_store(monkeypatch):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
with open(os.path.join(data_dir, 'schema.json')) as s:
schema = json.loads(s.read())
dimensions = [f['field_name'] for f in schema['dimensions']]
dataset = Dataset('test-bucket', dimensions, InMemoryStore('test-bucket'))
@staticmethod
def from_source(source_name):
return dataset
monkeypatch.setattr(Dataset, 'from_source', from_source)
return dataset.store
def upload_ping(store, value, **kwargs):
"""Upload value to a given store"""
ping_key_template = Template('$submission_date/$source_name/'
'$source_version/$doc_type/$app/$channel/'
'$version/$build_id/$filename')
dimensions = {
'submission_date': '20160805',
'source_name': 'telemetry',
'source_version': '4',
'doc_type': 'saved_session',
'app': 'Firefox',
'channel': 'nightly',
'version': '51.0a1',
'build_id': '20160801074053',
'filename': uuid4()
}
dimensions.update(kwargs)
key = ping_key_template.substitute(**dimensions)
store.store[key] = value
@pytest.fixture
def mock_message_parser(monkeypatch):
# monkeypatch the default `decoder` argument of `records`
monkeypatch.setattr('moztelemetry.heka_message_parser.parse_heka_message',
lambda message: (message.getvalue(),))
test_data_for_exact_match = [
('doc_type', 'saved_session', 'main'),
('app', 'Firefox', 'Thunderbird'),
('version', '48.0', '46.0'),
('source_name', 'telemetry', 'other source'),
('source_version', '4', '2'),
]
@pytest.mark.slow
@pytest.mark.parametrize('filter_name,exact,wrong', test_data_for_exact_match)
def test_get_pings_by_exact_match(test_store, mock_message_parser, spark_context,
filter_name, exact, wrong):
upload_ping(test_store, 'value1', **{filter_name: exact})
upload_ping(test_store, 'value2', **{filter_name: wrong})
pings = get_pings(spark_context, **{filter_name: exact})
assert pings.collect() == ['value1']
test_data_for_range_match = [
('submission_date', '20160110', '20150101', '20160101', '20160120'),
('build_id', '20160801074050', '20160801074055', '20160801074049', '20160801074052'),
]
@pytest.mark.slow
@pytest.mark.parametrize('filter_name,exact,wrong,start,end', test_data_for_range_match)
def test_get_pings_by_range(test_store, mock_message_parser, spark_context,
filter_name, exact, wrong, start, end):
upload_ping(test_store, 'value1', **{filter_name: exact})
upload_ping(test_store, 'value2', **{filter_name: wrong})
pings = get_pings(spark_context, **{filter_name: exact})
assert pings.collect() == ['value1']
pings = get_pings(spark_context, **{filter_name: (start, end)})
assert pings.collect() == ['value1']
@pytest.mark.slow
def test_get_pings_multiple_by_range(test_store, mock_message_parser, spark_context):
upload_ping(test_store, 'value1', **{f[0]: f[1] for f in test_data_for_range_match})
upload_ping(test_store, 'value2', **{f[0]: f[2] for f in test_data_for_range_match})
pings = get_pings(spark_context, **{f[0]: f[1] for f in test_data_for_range_match})
assert pings.collect() == ['value1']
pings = get_pings(spark_context, **{f[0]: (f[3], f[4]) for f in test_data_for_range_match})
assert pings.collect() == ['value1']
def test_get_pings_fraction(test_store, mock_message_parser, spark_context):
for i in range(1, 10+1):
upload_ping(test_store, 'value', build_id=str(i))
pings = get_pings(spark_context)
assert pings.count() == 10
pings = get_pings(spark_context, fraction=0.1)
assert pings.count() == 1
def test_get_pings_wrong_schema(test_store, mock_message_parser, spark_context):
with pytest.raises(ValueError):
pings = get_pings(spark_context, schema=1)
def test_get_pings_multiple_filters(test_store, mock_message_parser, spark_context):
filters = dict(submission_date='20160101', channel='beta')
upload_ping(test_store, 'value1', **filters)
filters['app'] = 'Thunderbird'
upload_ping(test_store, 'value2', **filters)
pings = get_pings(spark_context, **filters)
assert pings.collect() == ['value2']
def test_get_pings_none_filter(test_store, mock_message_parser, spark_context):
upload_ping(test_store, 'value1', app='Firefox')
upload_ping(test_store, 'value2', app='Thuderbird')
pings = get_pings(spark_context, app=None)
assert sorted(pings.collect()) == ['value1', 'value2']
pings = get_pings(spark_context, app='*')
assert sorted(pings.collect()) == ['value1', 'value2']
| whd/python_moztelemetry | tests/test_spark.py | Python | mpl-2.0 | 5,208 |
import pytest
import tempfile
import os
import ConfigParser
def getConfig(optionname,thedefault,section,configfile):
"""read an option from a config file or set a default
send 'thedefault' as the data class you want to get a string back
i.e. 'True' will return a string
True will return a bool
1 will return an int
"""
#getConfig('something','adefaultvalue')
retvalue=thedefault
opttype=type(thedefault)
if os.path.isfile(configfile):
config = ConfigParser.ConfigParser()
config.readfp(open(configfile))
if config.has_option(section,optionname):
if opttype==bool:
retvalue=config.getboolean(section,optionname)
elif opttype==int:
retvalue=config.getint(section,optionname)
elif opttype==float:
retvalue=config.getfloat(section,optionname)
else:
retvalue=config.get(section,optionname)
return retvalue
@pytest.fixture
def options():
options=dict()
configFile='setup.cfg'
if pytest.config.inifile:
configFile=str(pytest.config.inifile)
options["esserver"]=getConfig('esserver','localhost:9200','mozdef',configFile)
options["loginput"]=getConfig('loginput','localhost:8080','mozdef',configFile)
options["webuiurl"]=getConfig('webuiurl','http://localhost/','mozdef',configFile)
options["kibanaurl"]=getConfig('kibanaurl','http://localhost:9090/','mozdef',configFile)
if pytest.config.option.verbose > 0:
options["verbose"]=True
print('Using options: \n\t%r' % options)
else:
options["verbose"]=False
return options
@pytest.fixture()
def cleandir():
newpath = tempfile.mkdtemp()
os.chdir(newpath)
def pytest_report_header(config):
if config.option.verbose > 0:
return ["reporting verbose test output"]
#def pytest_addoption(parser):
#parser.addoption("--esserver",
#action="store",
#default="localhost:9200",
#help="elastic search servers to use for testing")
#parser.addoption("--mozdefserver",
#action="store",
#default="localhost:8080",
#help="mozdef server to use for testing") | netantho/MozDef | tests/conftest.py | Python | mpl-2.0 | 2,263 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Test cases for the TAAR Hybrid recommender
"""
from taar.recommenders.hybrid_recommender import CuratedRecommender
from taar.recommenders.hybrid_recommender import HybridRecommender
from taar.recommenders.ensemble_recommender import EnsembleRecommender
from taar.recommenders.s3config import TAAR_WHITELIST_BUCKET, TAAR_WHITELIST_KEY
# from taar.recommenders.hybrid_recommender import ENSEMBLE_WEIGHTS
from .test_ensemblerecommender import install_mock_ensemble_data
from .mocks import MockRecommenderFactory
import json
from moto import mock_s3
import boto3
def install_no_curated_data(ctx):
ctx = ctx.child()
conn = boto3.resource("s3", region_name="us-west-2")
conn.create_bucket(Bucket=TAAR_WHITELIST_BUCKET)
conn.Object(TAAR_WHITELIST_BUCKET, TAAR_WHITELIST_KEY).put(Body="")
return ctx
def install_mock_curated_data(ctx):
mock_data = []
for i in range(20):
mock_data.append(str(i) * 16)
ctx = ctx.child()
conn = boto3.resource("s3", region_name="us-west-2")
conn.create_bucket(Bucket=TAAR_WHITELIST_BUCKET)
conn.Object(TAAR_WHITELIST_BUCKET, TAAR_WHITELIST_KEY).put(
Body=json.dumps(mock_data)
)
return ctx
def install_ensemble_fixtures(ctx):
ctx = install_mock_ensemble_data(ctx)
factory = MockRecommenderFactory()
ctx["recommender_factory"] = factory
ctx["recommender_map"] = {
"collaborative": factory.create("collaborative"),
"similarity": factory.create("similarity"),
"locale": factory.create("locale"),
}
ctx["ensemble_recommender"] = EnsembleRecommender(ctx.child())
return ctx
@mock_s3
def test_curated_can_recommend(test_ctx):
ctx = install_no_curated_data(test_ctx)
r = CuratedRecommender(ctx)
# CuratedRecommender will always recommend something no matter
# what
assert r.can_recommend({})
assert r.can_recommend({"installed_addons": []})
@mock_s3
def test_curated_recommendations(test_ctx):
ctx = install_mock_curated_data(test_ctx)
r = CuratedRecommender(ctx)
# CuratedRecommender will always recommend something no matter
# what
for LIMIT in range(1, 5):
guid_list = r.recommend({"client_id": "000000"}, limit=LIMIT)
# The curated recommendations should always return with some kind
# of recommendations
assert len(guid_list) == LIMIT
@mock_s3
def test_hybrid_recommendations(test_ctx):
# verify that the recommendations mix the curated and
# ensemble results
ctx = install_mock_curated_data(test_ctx)
ctx = install_ensemble_fixtures(ctx)
r = HybridRecommender(ctx)
# Test that we can generate lists of results
for LIMIT in range(4, 8):
guid_list = r.recommend({"client_id": "000000"}, limit=LIMIT)
# The curated recommendations should always return with some kind
# of recommendations
assert len(guid_list) == LIMIT
# Test that the results are actually mixed
guid_list = r.recommend({"client_id": "000000"}, limit=4)
# A mixed list will have two recommendations with weight > 1.0
# (ensemble) and 2 with exactly weight 1.0 from the curated list
assert guid_list[0][1] > 1.0
assert guid_list[1][1] > 1.0
assert guid_list[2][1] == 1.0
assert guid_list[3][1] == 1.0
| maurodoglio/taar | tests/test_hybrid_recommender.py | Python | mpl-2.0 | 3,508 |
# title
# storyline
def show_trailer(title, youtube_url):
# Open browser and play trailer
def show_info():
# Print movie information
| teichopsia-/take_brake | movies.py | Python | mpl-2.0 | 147 |
from marshmallow import Schema, fields, post_dump
class BasePaging(Schema):
has_next = fields.Boolean()
has_prev = fields.Boolean()
next_num = fields.Integer()
prev_num = fields.Integer()
page = fields.Integer()
pages = fields.Integer()
per_page = fields.Integer()
total = fields.Integer()
@post_dump(pass_many=False)
def move_to_meta(self, data):
items = data.pop("items")
# FIXME: add support for paging links
return {
"meta": data,
"data": items.get("data", []),
"links": items.get("links", [])
}
def makePaginationSchema(itemsCls, field_cls=fields.Nested):
return type("{}Paging".format(itemsCls.__class__.__name__),
(BasePaging, ), dict(items=field_cls(itemsCls, many=True)))
| beavyHQ/beavy | beavy/common/paging_schema.py | Python | mpl-2.0 | 818 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from mo_dots import wrap
from mo_logs import strings
from pyLibrary.aws import s3
def _key2etl(key):
"""
CONVERT S3 KEY TO ETL HEADER
S3 NAMING CONVENTION: a.b.c WHERE EACH IS A STEP IN THE ETL PROCESS
HOW TO DEAL WITH a->b AS AGGREGATION? b:a.c? b->c is agg: a.c:b
"""
key = s3.strip_extension(key)
tokens = []
s = 0
i = strings.find(key, [":", "."])
while i < len(key):
tokens.append(key[s:i])
tokens.append(key[i])
s = i + 1
i = strings.find(key, [":", "."], s)
tokens.append(key[s:i])
_reverse_aggs(tokens)
# tokens.reverse()
source = {
"id": format_id(tokens[0])
}
for i in range(2, len(tokens), 2):
source = {
"id": format_id(tokens[i]),
"source": source,
"type": "join" if tokens[i - 1] == "." else "agg"
}
return wrap(source)
def _reverse_aggs(seq):
# SHOW AGGREGATION IN REVERSE ORDER (ASSUME ONLY ONE)
for i in range(1, len(seq), 2):
if seq[i] == ":":
seq[i - 1], seq[i + 1] = seq[i + 1], seq[i - 1]
def format_id(value):
"""
:param value:
:return: int() IF POSSIBLE
"""
try:
return int(value)
except Exception:
return unicode(value)
def lt(l, r):
"""
:param l: left key
:param r: right key
:return: True if l<r
"""
if r is None or l is None:
return True
for ll, rr in zip(l, r):
if ll < rr:
return True
elif ll > rr:
return False
return False
| klahnakoski/MySQL-to-S3 | mysql_to_s3/__init__.py | Python | mpl-2.0 | 1,965 |
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import apps
from .choices import NotifyLevel
from taiga.base.utils.text import strip_lines
def attach_watchers_to_queryset(queryset, as_field="watchers"):
"""Attach watching user ids to each object of the queryset.
:param queryset: A Django queryset object.
:param as_field: Attach the watchers as an attribute with this name.
:return: Queryset object with the additional `as_field` field.
"""
model = queryset.model
type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(model)
sql = ("""SELECT array(SELECT user_id
FROM notifications_watched
WHERE notifications_watched.content_type_id = {type_id}
AND notifications_watched.object_id = {tbl}.id)""")
sql = sql.format(type_id=type.id, tbl=model._meta.db_table)
qs = queryset.extra(select={as_field: sql})
return qs
def attach_is_watcher_to_queryset(queryset, user, as_field="is_watcher"):
"""Attach is_watcher boolean to each object of the queryset.
:param user: A users.User object model
:param queryset: A Django queryset object.
:param as_field: Attach the boolean as an attribute with this name.
:return: Queryset object with the additional `as_field` field.
"""
model = queryset.model
type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(model)
sql = ("""SELECT CASE WHEN (SELECT count(*)
FROM notifications_watched
WHERE notifications_watched.content_type_id = {type_id}
AND notifications_watched.object_id = {tbl}.id
AND notifications_watched.user_id = {user_id}) > 0
THEN TRUE
ELSE FALSE
END""")
sql = sql.format(type_id=type.id, tbl=model._meta.db_table, user_id=user.id)
qs = queryset.extra(select={as_field: sql})
return qs
def attach_total_watchers_to_queryset(queryset, as_field="total_watchers"):
"""Attach total_watchers boolean to each object of the queryset.
:param user: A users.User object model
:param queryset: A Django queryset object.
:param as_field: Attach the boolean as an attribute with this name.
:return: Queryset object with the additional `as_field` field.
"""
model = queryset.model
type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(model)
sql = ("""SELECT count(*)
FROM notifications_watched
WHERE notifications_watched.content_type_id = {type_id}
AND notifications_watched.object_id = {tbl}.id""")
sql = sql.format(type_id=type.id, tbl=model._meta.db_table)
qs = queryset.extra(select={as_field: sql})
return qs
def attach_project_is_watcher_to_queryset(queryset, user, as_field="is_watcher"):
"""Attach is_watcher boolean to each object of the projects queryset.
:param user: A users.User object model
:param queryset: A Django projects queryset object.
:param as_field: Attach the boolean as an attribute with this name.
:return: Queryset object with the additional `as_field` field.
"""
model = queryset.model
type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(model)
sql = ("""SELECT CASE WHEN (SELECT count(*)
FROM notifications_notifypolicy
WHERE notifications_notifypolicy.project_id = {tbl}.id
AND notifications_notifypolicy.user_id = {user_id}
AND notifications_notifypolicy.notify_level != {ignore_notify_level}) > 0
THEN TRUE
ELSE FALSE
END""")
sql = sql.format(tbl=model._meta.db_table, user_id=user.id, ignore_notify_level=NotifyLevel.ignore)
qs = queryset.extra(select={as_field: sql})
return qs
def attach_project_total_watchers_attrs_to_queryset(queryset, as_field="total_watchers"):
"""Attach watching user ids to each project of the queryset.
:param queryset: A Django projects queryset object.
:param as_field: Attach the watchers as an attribute with this name.
:return: Queryset object with the additional `as_field` field.
"""
model = queryset.model
type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(model)
sql = ("""SELECT count(user_id)
FROM notifications_notifypolicy
WHERE notifications_notifypolicy.project_id = {tbl}.id
AND notifications_notifypolicy.notify_level != {ignore_notify_level}""")
sql = sql.format(tbl=model._meta.db_table, ignore_notify_level=NotifyLevel.ignore)
qs = queryset.extra(select={as_field: sql})
return qs
def attach_notify_level_to_project_queryset(queryset, user):
"""
Function that attach "notify_level" attribute on each queryset
result for query notification level of current user for each
project in the most efficient way.
:param queryset: A Django queryset object.
:param user: A User model object.
:return: Queryset object with the additional `as_field` field.
"""
user_id = getattr(user, "id", None) or "NULL"
default_level = NotifyLevel.notwatch
sql = strip_lines("""
COALESCE((SELECT notifications_notifypolicy.notify_level
FROM notifications_notifypolicy
WHERE notifications_notifypolicy.project_id = projects_project.id
AND notifications_notifypolicy.user_id = {user_id}),
{default_level})
""")
sql = sql.format(user_id=user_id, default_level=default_level)
return queryset.extra(select={"notify_level": sql})
| dycodedev/taiga-back | taiga/projects/notifications/utils.py | Python | agpl-3.0 | 6,784 |
from django import forms
from games.models import DoorPrizeWinner
class DoorPrizeWinnerForm(forms.ModelForm):
class Meta:
model = DoorPrizeWinner
fields = ['fan', 'event']
| brickfiestastem/brickfiesta | games/forms.py | Python | agpl-3.0 | 195 |
# -*- coding: utf-8 -*-
#
# http://www.privacyidea.org
# (c) cornelius kölbel, privacyidea.org
#
# 2020-01-30 Jean-Pierre Höhmann <jean-pierre.hohemann@netknights.it>
# Add WebAuthn token
# 2018-01-22 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add offline refill
# 2016-12-20 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add triggerchallenge endpoint
# 2016-10-23 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add subscription decorator
# 2016-09-05 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# SAML attributes on fail
# 2016-08-30 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# save client application type to database
# 2016-08-09 Cornelius Kölbel <cornelius@privacyidea.org>
# Add possibility to check OTP only
# 2015-11-19 Cornelius Kölbel <cornelius@privacyidea.org>
# Add support for transaction_id to saml_check
# 2015-06-17 Cornelius Kölbel <cornelius@privacyidea.org>
# Add policy decorator for API key requirement
# 2014-12-08 Cornelius Kölbel, <cornelius@privacyidea.org>
# Complete rewrite during flask migration
# Try to provide REST API
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__ = """This module contains the REST API for doing authentication.
The methods are tested in the file tests/test_api_validate.py
Authentication is either done by providing a username and a password or a
serial number and a password.
**Authentication workflow**
Authentication workflow is like this:
In case of authenticating a user:
* :func:`privacyidea.lib.token.check_user_pass`
* :func:`privacyidea.lib.token.check_token_list`
* :func:`privacyidea.lib.tokenclass.TokenClass.authenticate`
* :func:`privacyidea.lib.tokenclass.TokenClass.check_pin`
* :func:`privacyidea.lib.tokenclass.TokenClass.check_otp`
In case if authenticating a serial number:
* :func:`privacyidea.lib.token.check_serial_pass`
* :func:`privacyidea.lib.token.check_token_list`
* :func:`privacyidea.lib.tokenclass.TokenClass.authenticate`
* :func:`privacyidea.lib.tokenclass.TokenClass.check_pin`
* :func:`privacyidea.lib.tokenclass.TokenClass.check_otp`
"""
from flask import (Blueprint, request, g, current_app)
from privacyidea.lib.user import get_user_from_param, log_used_user
from .lib.utils import send_result, getParam
from ..lib.decorators import (check_user_or_serial_in_request)
from .lib.utils import required
from privacyidea.lib.error import ParameterError
from privacyidea.lib.token import (check_user_pass, check_serial_pass,
check_otp, create_challenges_from_tokens, get_one_token)
from privacyidea.api.lib.utils import get_all_params
from privacyidea.lib.config import (return_saml_attributes, get_from_config,
return_saml_attributes_on_fail,
SYSCONF, ensure_no_config_object)
from privacyidea.lib.audit import getAudit
from privacyidea.api.lib.decorators import add_serial_from_response_to_g
from privacyidea.api.lib.prepolicy import (prepolicy, set_realm,
api_key_required, mangle,
save_client_application_type,
check_base_action, pushtoken_wait, webauthntoken_auth, webauthntoken_authz,
webauthntoken_request, check_application_tokentype)
from privacyidea.api.lib.postpolicy import (postpolicy,
check_tokentype, check_serial,
check_tokeninfo,
no_detail_on_fail,
no_detail_on_success, autoassign,
offline_info,
add_user_detail_to_response, construct_radius_response,
mangle_challenge_response, is_authorized)
from privacyidea.lib.policy import PolicyClass
from privacyidea.lib.event import EventConfiguration
import logging
from privacyidea.api.register import register_blueprint
from privacyidea.api.recover import recover_blueprint
from privacyidea.lib.utils import get_client_ip
from privacyidea.lib.event import event
from privacyidea.lib.challenge import get_challenges, extract_answered_challenges
from privacyidea.lib.subscriptions import CheckSubscription
from privacyidea.api.auth import admin_required
from privacyidea.lib.policy import ACTION
from privacyidea.lib.token import get_tokens
from privacyidea.lib.machine import list_machine_tokens
from privacyidea.lib.applications.offline import MachineApplication
import json
log = logging.getLogger(__name__)
validate_blueprint = Blueprint('validate_blueprint', __name__)
@validate_blueprint.before_request
@register_blueprint.before_request
@recover_blueprint.before_request
def before_request():
"""
This is executed before the request
"""
ensure_no_config_object()
request.all_data = get_all_params(request)
request.User = get_user_from_param(request.all_data)
privacyidea_server = current_app.config.get("PI_AUDIT_SERVERNAME") or \
request.host
# Create a policy_object, that reads the database audit settings
# and contains the complete policy definition during the request.
# This audit_object can be used in the postpolicy and prepolicy and it
# can be passed to the innerpolicies.
g.policy_object = PolicyClass()
g.audit_object = getAudit(current_app.config, g.startdate)
g.event_config = EventConfiguration()
# access_route contains the ip addresses of all clients, hops and proxies.
g.client_ip = get_client_ip(request, get_from_config(SYSCONF.OVERRIDECLIENT))
# Save the HTTP header in the localproxy object
g.request_headers = request.headers
g.serial = getParam(request.all_data, "serial", default=None)
g.audit_object.log({"success": False,
"action_detail": "",
"client": g.client_ip,
"client_user_agent": request.user_agent.browser,
"privacyidea_server": privacyidea_server,
"action": "{0!s} {1!s}".format(request.method, request.url_rule),
"info": ""})
@validate_blueprint.route('/offlinerefill', methods=['POST'])
@check_user_or_serial_in_request(request)
@event("validate_offlinerefill", request, g)
def offlinerefill():
"""
This endpoint allows to fetch new offline OTP values for a token,
that is already offline.
According to the definition it will send the missing OTP values, so that
the client will have as much otp values as defined.
:param serial: The serial number of the token, that should be refilled.
:param refilltoken: The authorization token, that allows refilling.
:param pass: the last password (maybe password+OTP) entered by the user
:return:
"""
serial = getParam(request.all_data, "serial", required)
refilltoken = getParam(request.all_data, "refilltoken", required)
password = getParam(request.all_data, "pass", required)
tokenobj_list = get_tokens(serial=serial)
if len(tokenobj_list) != 1:
raise ParameterError("The token does not exist")
else:
tokenobj = tokenobj_list[0]
tokenattachments = list_machine_tokens(serial=serial, application="offline")
if tokenattachments:
# TODO: Currently we do not distinguish, if a token had more than one offline attachment
# We need the options to pass the count and the rounds for the next offline OTP values,
# which could have changed in the meantime.
options = tokenattachments[0].get("options")
# check refill token:
if tokenobj.get_tokeninfo("refilltoken") == refilltoken:
# refill
otps = MachineApplication.get_refill(tokenobj, password, options)
refilltoken = MachineApplication.generate_new_refilltoken(tokenobj)
response = send_result(True)
content = response.json
content["auth_items"] = {"offline": [{"refilltoken": refilltoken,
"response": otps}]}
response.set_data(json.dumps(content))
return response
raise ParameterError("Token is not an offline token or refill token is incorrect")
@validate_blueprint.route('/check', methods=['POST', 'GET'])
@validate_blueprint.route('/radiuscheck', methods=['POST', 'GET'])
@validate_blueprint.route('/samlcheck', methods=['POST', 'GET'])
@postpolicy(is_authorized, request=request)
@postpolicy(mangle_challenge_response, request=request)
@postpolicy(construct_radius_response, request=request)
@postpolicy(no_detail_on_fail, request=request)
@postpolicy(no_detail_on_success, request=request)
@postpolicy(add_user_detail_to_response, request=request)
@postpolicy(offline_info, request=request)
@postpolicy(check_tokeninfo, request=request)
@postpolicy(check_tokentype, request=request)
@postpolicy(check_serial, request=request)
@postpolicy(autoassign, request=request)
@add_serial_from_response_to_g
@prepolicy(check_application_tokentype, request=request)
@prepolicy(pushtoken_wait, request=request)
@prepolicy(set_realm, request=request)
@prepolicy(mangle, request=request)
@prepolicy(save_client_application_type, request=request)
@prepolicy(webauthntoken_request, request=request)
@prepolicy(webauthntoken_authz, request=request)
@prepolicy(webauthntoken_auth, request=request)
@check_user_or_serial_in_request(request)
@CheckSubscription(request)
@prepolicy(api_key_required, request=request)
@event("validate_check", request, g)
def check():
"""
check the authentication for a user or a serial number.
Either a ``serial`` or a ``user`` is required to authenticate.
The PIN and OTP value is sent in the parameter ``pass``.
In case of successful authentication it returns ``result->value: true``.
In case of a challenge response authentication a parameter ``exception=1``
can be passed. This would result in a HTTP 500 Server Error response if
an error occurred during sending of SMS or Email.
In case ``/validate/radiuscheck`` is requested, the responses are
modified as follows: A successful authentication returns an empty ``HTTP
204`` response. An unsuccessful authentication returns an empty ``HTTP
400`` response. Error responses are the same responses as for the
``/validate/check`` endpoint.
:param serial: The serial number of the token, that tries to authenticate.
:param user: The loginname/username of the user, who tries to authenticate.
:param realm: The realm of the user, who tries to authenticate. If the
realm is omitted, the user is looked up in the default realm.
:param type: The tokentype of the tokens, that are taken into account during
authentication. Requires the *authz* policy :ref:`application_tokentype_policy`.
It is ignored when a distinct serial is given.
:param pass: The password, that consists of the OTP PIN and the OTP value.
:param otponly: If set to 1, only the OTP value is verified. This is used
in the management UI. Only used with the parameter serial.
:param transaction_id: The transaction ID for a response to a challenge
request
:param state: The state ID for a response to a challenge request
:return: a json result with a boolean "result": true
**Example Validation Request**:
.. sourcecode:: http
POST /validate/check HTTP/1.1
Host: example.com
Accept: application/json
user=user
realm=realm1
pass=s3cret123456
**Example response** for a successful authentication:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"detail": {
"message": "matching 1 tokens",
"serial": "PISP0000AB00",
"type": "spass"
},
"id": 1,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": true
},
"version": "privacyIDEA unknown"
}
**Example response** for this first part of a challenge response authentication:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"detail": {
"serial": "PIEM0000AB00",
"type": "email",
"transaction_id": "12345678901234567890",
"multi_challenge: [ {"serial": "PIEM0000AB00",
"transaction_id": "12345678901234567890",
"message": "Please enter otp from your email",
"client_mode": "interactive"},
{"serial": "PISM12345678",
"transaction_id": "12345678901234567890",
"message": "Please enter otp from your SMS",
"client_mode": "interactive"}
]
},
"id": 2,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": false
},
"version": "privacyIDEA unknown"
}
In this example two challenges are triggered, one with an email and one
with an SMS. The application and thus the user has to decide, which one
to use. They can use either.
The challenges also contain the information of the "client_mode". This
tells the plugin, whether it should display an input field to ask for the
OTP value or e.g. to poll for an answered authentication.
Read more at :ref:`client_modes`.
.. note:: All challenge response tokens have the same ``transaction_id`` in
this case.
**Example response** for a successful authentication with ``/samlcheck``:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"detail": {
"message": "matching 1 tokens",
"serial": "PISP0000AB00",
"type": "spass"
},
"id": 1,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": {"attributes": {
"username": "koelbel",
"realm": "themis",
"mobile": null,
"phone": null,
"myOwn": "/data/file/home/koelbel",
"resolver": "themis",
"surname": "Kölbel",
"givenname": "Cornelius",
"email": null},
"auth": true}
},
"version": "privacyIDEA unknown"
}
The response in ``value->attributes`` can contain additional attributes
(like "myOwn") which you can define in the LDAP resolver in the attribute
mapping.
"""
user = request.User
serial = getParam(request.all_data, "serial")
password = getParam(request.all_data, "pass", required)
otp_only = getParam(request.all_data, "otponly")
token_type = getParam(request.all_data, "type")
options = {"g": g,
"clientip": g.client_ip,
"user": user}
# Add all params to the options
for key, value in request.all_data.items():
if value and key not in ["g", "clientip", "user"]:
options[key] = value
g.audit_object.log({"user": user.login,
"resolver": user.resolver,
"realm": user.realm})
if serial:
if user:
# check if the given token belongs to the user
if not get_tokens(user=user, serial=serial, count=True):
raise ParameterError('Given serial does not belong to given user!')
if not otp_only:
success, details = check_serial_pass(serial, password, options=options)
else:
success, details = check_otp(serial, password)
result = success
else:
options["token_type"] = token_type
success, details = check_user_pass(user, password, options=options)
result = success
if request.path.endswith("samlcheck"):
ui = user.info
result = {"auth": success,
"attributes": {}}
if return_saml_attributes():
if success or return_saml_attributes_on_fail():
# privacyIDEA's own attribute map
result["attributes"] = {"username": ui.get("username"),
"realm": user.realm,
"resolver": user.resolver,
"email": ui.get("email"),
"surname": ui.get("surname"),
"givenname": ui.get("givenname"),
"mobile": ui.get("mobile"),
"phone": ui.get("phone")}
# additional attributes
for k, v in ui.items():
result["attributes"][k] = v
g.audit_object.log({"info": log_used_user(user, details.get("message")),
"success": success,
"serial": serial or details.get("serial"),
"token_type": details.get("type")})
return send_result(result, rid=2, details=details)
@validate_blueprint.route('/triggerchallenge', methods=['POST', 'GET'])
@admin_required
@postpolicy(is_authorized, request=request)
@postpolicy(mangle_challenge_response, request=request)
@add_serial_from_response_to_g
@check_user_or_serial_in_request(request)
@prepolicy(check_application_tokentype, request=request)
@prepolicy(check_base_action, request, action=ACTION.TRIGGERCHALLENGE)
@prepolicy(webauthntoken_request, request=request)
@prepolicy(webauthntoken_auth, request=request)
@event("validate_triggerchallenge", request, g)
def trigger_challenge():
"""
An administrator can call this endpoint if he has the right of
``triggerchallenge`` (scope: admin).
He can pass a ``user`` name and or a ``serial`` number.
privacyIDEA will trigger challenges for all native challenges response
tokens, possessed by this user or only for the given serial number.
The request needs to contain a valid PI-Authorization header.
:param user: The loginname/username of the user, who tries to authenticate.
:param realm: The realm of the user, who tries to authenticate. If the
realm is omitted, the user is looked up in the default realm.
:param serial: The serial number of the token.
:param type: The tokentype of the tokens, that are taken into account during
authentication. Requires authz policy application_tokentype.
Is ignored when a distinct serial is given.
:return: a json result with a "result" of the number of matching
challenge response tokens
**Example response** for a successful triggering of challenge:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"detail": {
"client_mode": "interactive",
"message": "please enter otp: , please enter otp: ",
"messages": [
"please enter otp: ",
"please enter otp: "
],
"multi_challenge": [
{
"client_mode": "interactive",
"message": "please enter otp: ",
"serial": "TOTP000026CB",
"transaction_id": "11451135673179897001",
"type": "totp"
},
{
"client_mode": "interactive",
"message": "please enter otp: ",
"serial": "OATH0062752C",
"transaction_id": "11451135673179897001",
"type": "hotp"
}
],
"serial": "OATH0062752C",
"threadid": 140329819764480,
"transaction_id": "11451135673179897001",
"transaction_ids": [
"11451135673179897001",
"11451135673179897001"
],
"type": "hotp"
},
"id": 2,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": 2
}
**Example response** for response, if the user has no challenge token:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"detail": {"messages": [],
"threadid": 140031212377856,
"transaction_ids": []},
"id": 1,
"jsonrpc": "2.0",
"result": {"status": true,
"value": 0},
"signature": "205530282...54508",
"time": 1484303812.346576,
"version": "privacyIDEA 2.17",
"versionnumber": "2.17"
}
**Example response** for a failed triggering of a challenge. In this case
the ``status`` will be ``false``.
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"detail": null,
"id": 1,
"jsonrpc": "2.0",
"result": {"error": {"code": 905,
"message": "ERR905: The user can not be
found in any resolver in this realm!"},
"status": false},
"signature": "14468...081555",
"time": 1484303933.72481,
"version": "privacyIDEA 2.17"
}
"""
user = request.User
serial = getParam(request.all_data, "serial")
token_type = getParam(request.all_data, "type")
details = {"messages": [],
"transaction_ids": []}
options = {"g": g,
"clientip": g.client_ip,
"user": user}
# Add all params to the options
for key, value in request.all_data.items():
if value and key not in ["g", "clientip", "user"]:
options[key] = value
token_objs = get_tokens(serial=serial, user=user, active=True, revoked=False, locked=False, tokentype=token_type)
# Only use the tokens, that are allowed to do challenge response
chal_resp_tokens = [token_obj for token_obj in token_objs if "challenge" in token_obj.mode]
create_challenges_from_tokens(chal_resp_tokens, details, options)
result_obj = len(details.get("multi_challenge"))
challenge_serials = [challenge_info["serial"] for challenge_info in details["multi_challenge"]]
g.audit_object.log({
"user": user.login,
"resolver": user.resolver,
"realm": user.realm,
"success": result_obj > 0,
"info": log_used_user(user, "triggered {0!s} challenges".format(result_obj)),
"serial": ",".join(challenge_serials),
})
return send_result(result_obj, rid=2, details=details)
@validate_blueprint.route('/polltransaction', methods=['GET'])
@validate_blueprint.route('/polltransaction/<transaction_id>', methods=['GET'])
@prepolicy(mangle, request=request)
@CheckSubscription(request)
@prepolicy(api_key_required, request=request)
def poll_transaction(transaction_id=None):
"""
Given a mandatory transaction ID, check if any non-expired challenge for this transaction ID
has been answered. In this case, return true. If this is not the case, return false.
This endpoint also returns false if no challenge with the given transaction ID exists.
This is mostly useful for out-of-band tokens that should poll this endpoint
to determine when to send an authentication request to ``/validate/check``.
:jsonparam transaction_id: a transaction ID
"""
if transaction_id is None:
transaction_id = getParam(request.all_data, "transaction_id", required)
# Fetch a list of non-exired challenges with the given transaction ID
# and determine whether it contains at least one non-expired answered challenge.
matching_challenges = [challenge for challenge in get_challenges(transaction_id=transaction_id)
if challenge.is_valid()]
answered_challenges = extract_answered_challenges(matching_challenges)
if answered_challenges:
result = True
log_challenges = answered_challenges
else:
result = False
log_challenges = matching_challenges
# We now determine the information that should be written to the audit log:
# * If there are no answered valid challenges, we log all token serials of challenges matching
# the transaction ID and the corresponding token owner
# * If there are any answered valid challenges, we log their token serials and the corresponding user
if log_challenges:
g.audit_object.log({
"serial": ",".join(challenge.serial for challenge in log_challenges),
})
# The token owner should be the same for all matching transactions
user = get_one_token(serial=log_challenges[0].serial).user
if user:
g.audit_object.log({
"user": user.login,
"resolver": user.resolver,
"realm": user.realm,
})
# In any case, we log the transaction ID
g.audit_object.log({
"info": u"transaction_id: {}".format(transaction_id),
"success": result
})
return send_result(result)
| privacyidea/privacyidea | privacyidea/api/validate.py | Python | agpl-3.0 | 27,219 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Actor'
db.create_table('actors_actor', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('registered_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('last_activity', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('owned', self.gf('django.db.models.fields.BooleanField')(default=False)),
('calendar', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['agenda.Calendar'], unique=True, null=True, blank=True)),
))
db.send_create_signal('actors', ['Actor'])
def backwards(self, orm):
# Deleting model 'Actor'
db.delete_table('actors_actor')
models = {
'actors.actor': {
'Meta': {'object_name': 'Actor'},
'calendar': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['agenda.Calendar']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'owned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'registered_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'agenda.calendar': {
'Meta': {'object_name': 'Calendar'},
'events': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'calendars'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['agenda.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'agenda.event': {
'Meta': {'ordering': "['-event_date', '-start_time', '-title']", 'unique_together': "(('event_date', 'slug'),)", 'object_name': 'Event'},
'add_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'allow_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'end_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'event_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mod_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 4, 5, 12, 34, 7, 487258)'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'start_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'V'", 'max_length': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['actors']
| SpreadBand/SpreadBand | apps/actors/migrations/0001_initial.py | Python | agpl-3.0 | 7,406 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2017-12-18 07:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exams', '0058_fill_explanation_is_contribution'),
]
operations = [
migrations.AddField(
model_name='session',
name='has_finished',
field=models.NullBooleanField(default=None),
),
]
| Zahajamaan/Fudulbank | exams/migrations/0059_session_has_finished.py | Python | agpl-3.0 | 474 |
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
# Modules pins and description
M1A = session.query(Module).filter(Module.name == 'M1A').first()
M1B = session.query(Module).filter(Module.name == 'M1B').first()
M1C = session.query(Module).filter(Module.name == 'M1C').first()
M2A = session.query(Module).filter(Module.name == 'M2A').first()
M2B = session.query(Module).filter(Module.name == 'M2B').first()
M2C = session.query(Module).filter(Module.name == 'M2C').first()
M3A = session.query(Module).filter(Module.name == 'M3A').first()
M3B = session.query(Module).filter(Module.name == 'M3B').first()
M3C = session.query(Module).filter(Module.name == 'M3C').first()
M4A = session.query(Module).filter(Module.name == 'M4A').first()
M4B = session.query(Module).filter(Module.name == 'M4B').first()
M4C = session.query(Module).filter(Module.name == 'M4C').first()
M5A = session.query(Module).filter(Module.name == 'M5A').first()
M5B = session.query(Module).filter(Module.name == 'M5B').first()
M5C = session.query(Module).filter(Module.name == 'M5C').first()
M6A = session.query(Module).filter(Module.name == 'M6A').first()
M6B = session.query(Module).filter(Module.name == 'M6B').first()
M6C = session.query(Module).filter(Module.name == 'M6C').first()
M7A = session.query(Module).filter(Module.name == 'M7A').first()
M7B = session.query(Module).filter(Module.name == 'M7B').first()
M7C = session.query(Module).filter(Module.name == 'M7C').first()
# Statup inputs BCM pin
input_pins = [0, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25]
# Statup outputs BCM pin
output_pins = [26, 27]
def main():
# Set up GPIO using BCM numbering
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(26, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(27, GPIO.OUT, initial=GPIO.LOW)
def modo0():
for pin in input_pins:
try:
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
except:
print u"Erro de ativação do Pino BCM %s", pin
stdout.flush()
for pin in output_pins:
try:
GPIO.setup(pin, GPIO.OUT, initial=GPIO.LOW)
except:
print u"Erro na ativação do Pino BCM %s", pin
stdout.flush()
return(True)
def modo1():
global M1A, M1B, M1C
global M2A, M2B, M2C
global M3A, M3B, M3C
global M4A, M4B, M4C
global M5A, M5B, M5C
global M6A, M6B, M6C
global M7A, M7B, M7C
try:
GPIO.output(26, GPIO.HIGH)
except:
print u'Erro ao setar o nível do pino BCM pin 26'
try:
GPIO.output(27, GPIO.LOW)
except:
print u'Erro ao setar o nível do pino BCM pin 27'
sleep(5)
discovery_mods(M1A, M1B, M1C)
discovery_mods(M2A, M2B, M2C)
discovery_mods(M3A, M3B, M3C)
discovery_mods(M4A, M4B, M4C)
discovery_mods(M5A, M5B, M5C)
discovery_mods(M6A, M6B, M6C)
discovery_mods(M7A, M7B, M7C)
def modo3():
try:
GPIO.output(26, GPIO.HIGH)
except:
print u'Erro ao setar o nível do pino BCM pin 26'
try:
GPIO.output(27, GPIO.HIGH)
except:
print u'Erro ao setar o nível do pino BCM pin 27'
return True
def switch_on(_M):
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
if GPIO.gpio_function(_M.gpio) == 0:
GPIO.setup(_M.gpio, GPIO.OUT, initial=GPIO.LOW)
GPIO.output(_M.gpio, GPIO.HIGH)
_M.status = True
session.commit()
else:
print 'ERROR! This pin is set as a input'
def switch_off(_M):
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
if GPIO.gpio_function(_M.gpio) == 0:
GPIO.setup(_M.gpio, GPIO.OUT, initial=GPIO.HIGH)
GPIO.output(_M.gpio, GPIO.LOW)
_M.status = False
session.commit()
else:
print 'ERROR! This pin is set as a input'
def reset_pin(_M, _time):
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
if GPIO.gpio_function(_M.gpio) == 0:
switch_on(_M)
sleep(_time)
switch_off(_M)
else:
print 'ERROR! This pin is set as a input'
def softreset(_host):
from subprocess import call
call(["net", "rpc", "shutdown", "-r", "-I", "192.168.1.21", "-U", "Administrador%SemParar"])
def discovery_mods(_MA, _MB, _MC):
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
if GPIO.input(_MA.gpio) == 0 and GPIO.input(_MB.gpio) == 1:
GPIO.setup(_MA.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(_MB.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(_MC.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
_MA.io_type = 'input'
_MA.rpull = False
_MB.io_type = 'input'
_MB.rpull = False
_MC.io_type = 'input'
_MC.rpull = False
session.commit()
elif GPIO.input(_MA.gpio) == 1 and GPIO.input(_MB.gpio) == 0:
GPIO.setup(_MA.gpio, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(_MB.gpio, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(_MC.gpio, GPIO.OUT, initial=GPIO.LOW)
_MA.io_type = 'output'
_MA.status = False
_MB.io_type = 'output'
_MB.status = False
_MC.io_type = 'output'
_MC.status = False
session.commit()
else:
GPIO.setup(_MA.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(_MB.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(_MC.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
_MA.io_type = 'empty'
_MA.rpull = False
_MB.io_type = 'empty'
_MB.rpull = False
_MC.io_type = 'empty'
_MC.rpull = False
session.commit()
def cleanup_pins():
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.cleanup()
if __name__ == "__main__":
main()
| TemosEngenharia/RPI-IO | RPi_IO/rpi_io.py | Python | agpl-3.0 | 6,968 |
# Copyright 2019 Mentxu Isuskitza - AvanzOSC
# Copyright 2019 Oihana Larrañaga - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import api, fields, models
from odoo.models import expression
from odoo.tools.safe_eval import safe_eval
class FleetRouteStop(models.Model):
_name = 'fleet.route.stop'
_description = 'Route Stop'
_order = 'route_id, sequence, estimated_time'
name = fields.Char(string='Description', required=True)
location_id = fields.Many2one(
string='Location', comodel_name='res.partner',
domain=lambda self: [
('category_id', 'in',
self.env.ref('fleet_route.stop_location_partner_cat').id)])
street = fields.Char(
string='Street', related='location_id.street')
city = fields.Char(
string='City', related='location_id.city')
state_id = fields.Many2one(
string='State', comodel_name='res.country.state',
related='location_id.state_id')
country_id = fields.Many2one(
string='Country', comodel_name='res.country',
related='location_id.country_id')
comment = fields.Text(
string='Internal notes', related='location_id.comment')
estimated_time = fields.Float(string='Estimated time')
sequence = fields.Integer(string="Sequence", default=1)
route_id = fields.Many2one(
string='Route', comodel_name='fleet.route', required=True,
ondelete='cascade')
manager_id = fields.Many2one(
string="Manager", comodel_name="hr.employee",
related="route_id.manager_id", store=True)
manager_phone_mobile = fields.Char(
string="Phone/mobile", related="route_id.manager_phone_mobile",
store=True)
@api.onchange("location_id")
def _onchange_location_id(self):
self.ensure_one()
if not self.name:
self.name = self.location_id.display_name
@api.multi
def open_map(self):
self.ensure_one()
return self.location_id.open_map()
@api.multi
def button_open_form(self):
self.ensure_one()
action = self.env.ref("fleet_route.action_fleet_route_stop")
form_view = self.env.ref("fleet_route.fleet_route_stop_view_form")
action_dict = action.read()[0] if action else {}
domain = expression.AND([
[("id", "=", self.id)],
safe_eval(action.domain or "[]")])
action_dict.update({
"domain": domain,
"view_id": form_view.id,
"view_mode": "form",
"res_id": self.id,
"views": [],
})
return action_dict
@api.multi
def name_get(self):
""" name_get() -> [(id, name), ...]
Returns a textual representation for the records in ``self``.
By default this is the value of the ``display_name`` field.
:return: list of pairs ``(id, text_repr)`` for each records
:rtype: list(tuple)
"""
result = []
if self.env.context.get("hide_route"):
return super(FleetRouteStop, self).name_get()
for record in self:
field = record.route_id._fields["direction"]
direction = field.convert_to_export(
record.route_id["direction"], record.route_id)
result.append((record.id, "{} [{} ({})]".format(
record.name, record.route_id.name_id.name, direction)))
return result
| oihane/odoo-addons | fleet_route/models/fleet_route_stop.py | Python | agpl-3.0 | 3,444 |
# -*- coding: utf-8 -*-
import datetime
from django.db import models
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ComponentError'
db.create_table(u'maasserver_componenterror', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('component', self.gf('django.db.models.fields.CharField')(unique=True, max_length=40)),
('error', self.gf('django.db.models.fields.CharField')(max_length=1000)),
))
db.send_create_signal(u'maasserver', ['ComponentError'])
def backwards(self, orm):
# Deleting model 'ComponentError'
db.delete_table(u'maasserver_componenterror')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maasserver.bootimage': {
'Meta': {'unique_together': "((u'architecture', u'subarchitecture', u'release', u'purpose'),)", 'object_name': 'BootImage'},
'architecture': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'purpose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'release': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'subarchitecture': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'maasserver.componenterror': {
'Meta': {'object_name': 'ComponentError'},
'component': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'maasserver.config': {
'Meta': {'object_name': 'Config'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('maasserver.fields.JSONObjectField', [], {'null': 'True'})
},
u'maasserver.dhcplease': {
'Meta': {'object_name': 'DHCPLease'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'}),
'mac': ('maasserver.fields.MACAddressField', [], {}),
'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']"})
},
u'maasserver.filestorage': {
'Meta': {'object_name': 'FileStorage'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'filename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'maasserver.macaddress': {
'Meta': {'object_name': 'MACAddress'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac_address': ('maasserver.fields.MACAddressField', [], {'unique': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'maasserver.node': {
'Meta': {'object_name': 'Node'},
'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386/generic'", 'max_length': '31'}),
'cpu_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'distro_series': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'hardware_details': ('maasserver.fields.XMLField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']", 'null': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}),
'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}),
'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-a2984710-098f-11e2-869a-002608dc6120'", 'unique': 'True', 'max_length': '41'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['maasserver.Tag']", 'symmetrical': 'False'}),
'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'maasserver.nodegroup': {
'Meta': {'object_name': 'NodeGroup'},
'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}),
'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'dhcp_key': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'})
},
u'maasserver.nodegroupinterface': {
'Meta': {'unique_together': "((u'nodegroup', u'interface'),)", 'object_name': 'NodeGroupInterface'},
'broadcast_ip': ('django.db.models.fields.GenericIPAddressField', [], {'default': 'None', 'max_length': '39', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interface': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'ip_range_high': ('django.db.models.fields.GenericIPAddressField', [], {'default': 'None', 'max_length': '39', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'ip_range_low': ('django.db.models.fields.GenericIPAddressField', [], {'default': 'None', 'max_length': '39', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'management': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']"}),
'router_ip': ('django.db.models.fields.GenericIPAddressField', [], {'default': 'None', 'max_length': '39', 'null': 'True', 'blank': 'True'}),
'subnet_mask': ('django.db.models.fields.GenericIPAddressField', [], {'default': 'None', 'max_length': '39', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'maasserver.sshkey': {
'Meta': {'unique_together': "((u'user', u'key'),)", 'object_name': 'SSHKey'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'maasserver.tag': {
'Meta': {'object_name': 'Tag'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'definition': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'maasserver.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'piston.consumer': {
'Meta': {'object_name': 'Consumer'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': "orm['auth.User']"})
},
'piston.token': {
'Meta': {'object_name': 'Token'},
'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Consumer']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1348852328L'}),
'token_type': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': "orm['auth.User']"}),
'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'})
}
}
complete_apps = ['maasserver'] | cloudbase/maas | src/maasserver/migrations/0033_component_error.py | Python | agpl-3.0 | 15,078 |
# -*- coding: utf-8 -*-
import django.db.models.deletion
from django.db import models, migrations
import akvo.rsr.fields
class Migration(migrations.Migration):
dependencies = [
('rsr', '0067_auto_20160412_1858'),
]
operations = [
migrations.CreateModel(
name='IatiCheck',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.PositiveSmallIntegerField(verbose_name='status')),
('description', akvo.rsr.fields.ValidXMLTextField(verbose_name='description')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='iati_checks', verbose_name='project', to='rsr.Project')),
],
options={
'verbose_name': 'IATI check',
'verbose_name_plural': 'IATI checks',
},
bases=(models.Model,),
),
]
| akvo/akvo-rsr | akvo/rsr/migrations/0068_iaticheck.py | Python | agpl-3.0 | 1,001 |
# -*- coding: utf-8 -*-
from flask import Blueprint, current_app, request, render_template, abort, url_for, redirect
from pymongo import DESCENDING
import arrow
from widukind_web import constants
from widukind_web.extensions import auth
from widukind_web.extensions import cache
from widukind_web import queries
from widukind_common.flask_utils import json_tools
bp = Blueprint('admin', __name__)
#TODO: redis cache view
@bp.route('/providers', endpoint="providers")
@auth.required
def all_providers():
cursor = queries.col_providers().find({})
providers = [doc for doc in cursor]
datasets_counters = queries.datasets_counter_status()
return render_template("admin/providers.html", providers=providers,
datasets_counters=datasets_counters)
@bp.route('/datasets/<slug>', endpoint="datasets")
@auth.required
def all_datasets_for_provider_slug(slug):
provider = queries.col_providers().find_one({"slug": slug})
if not provider:
abort(404)
projection = {"dimension_list": False, "attribute_list": False,
"concepts": False, "codelists": False}
datasets = queries.col_datasets().find({"provider_name": provider["name"]},
projection)
series_counters = queries.series_counter(match={"provider_name": provider["name"]})
return render_template("admin/datasets.html",
provider=provider,
series_counters=series_counters,
datasets=datasets)
@bp.route('/datasets/disable', endpoint="datasets-disable")
@auth.required
def all_disable_datasets():
projection = {"dimension_list": False, "attribute_list": False,
"concepts": False, "codelists": False}
cursor = queries.col_datasets().find({"enable": False},
projection)
datasets = cursor.sort("provider_name", 1)
return render_template("admin/disable_datasets.html",
datasets=datasets)
@bp.route('/enable/provider/<slug>', endpoint="provider_enable")
@auth.required
def change_status_provider(slug):
query = {"slug": slug}
provider = queries.col_providers().find_one(query)
if not provider:
abort(404)
query_update = {}
if provider["enable"]:
query_update["enable"] = False
else:
query_update["enable"] = True
queries.col_providers().find_one_and_update(query, {"$set": query_update})
datasets_query = {"provider_name": provider["name"]}
queries.col_datasets().update_many(datasets_query, {"$set": query_update})
return redirect(url_for(".providers"))
@bp.route('/enable/dataset/<slug>', endpoint="dataset_enable")
@auth.required
def change_status_dataset(slug):
query = {"slug": slug}
dataset = queries.col_datasets().find_one(query)
if not dataset:
abort(404)
query_update = {}
if dataset["enable"]:
query_update["enable"] = False
else:
query_update["enable"] = True
queries.col_datasets().find_one_and_update(query, {"$set": query_update})
query = {"name": dataset["provider_name"]}
provider = queries.col_providers().find_one(query)
return redirect(url_for(".datasets", slug=provider["slug"]))
@bp.route('/db/profiling/<int:status>', endpoint="db-profiling")
@auth.required
def change_db_profiling(status=0):
db = current_app.widukind_db
db.set_profiling_level(status)
return redirect(url_for("home"))
@bp.route('/cache/clear', endpoint="cache_clear")
@auth.required
def cache_clear():
cache.clear()
return redirect(url_for("home"))
@bp.route('/doc/<col>/<objectid:objectid>', endpoint="doc")
@auth.required
def doc_view(col, objectid):
doc = current_app.widukind_db[col].find_one({"_id": objectid})
return render_template("admin/doc.html", doc=doc)
@bp.route('/db/profile/<int:position>', endpoint="profile-unit")
@bp.route('/db/profile', endpoint="profile")
@auth.required
def profile_view(position=-1):
exclude_ns = []
exclude_ns.append("%s.%s" % (current_app.widukind_db.name, constants.COL_SESSION))
exclude_ns.append("%s.system.profile" % current_app.widukind_db.name)
query = {"ns": {"$nin": exclude_ns}}
docs = current_app.widukind_db["system.profile"].find(query).sort([("ts", -1)]).limit(20)
if position >= 0:
doc = docs[position]
return render_template("admin/doc.html", doc=doc)
return render_template("admin/profile.html", docs=docs)
@bp.route('/db/stats/<col>', endpoint="col-stats")
@auth.required
def collection_stats_view(col):
doc = current_app.widukind_db.command("collstats", col)
return render_template("admin/doc.html", doc=doc)
@bp.route('/queries', endpoint="queries")
@auth.required
def queries_view():
is_ajax = request.args.get('json') or request.is_xhr
if not is_ajax:
return render_template("admin/queries.html")
col = current_app.widukind_db[constants.COL_QUERIES]
tags = request.args.get('tags')
q = {}
if tags:
tags = tags.split(",")
q['tags'] = {"$in": tags}
object_list = col.find(q).sort("created", DESCENDING)
result = []
for obj in object_list:
obj["view"] = url_for(".doc", col=constants.COL_QUERIES, objectid=obj["_id"])
result.append(obj)
return current_app.jsonify(result)
@bp.route('/logs', endpoint="logs")
@auth.required
def view_logs():
"""
{
"_id" : ObjectId("5665a7182d4b25012092ac71"),
"message" : "change count for BEA.datasets. old[0] - new[0]",
"level" : "INFO",
"timestamp" : Timestamp(1449502488, 860),
"loggerName" : "widukind_web",
"thread" : 74212736,
"threadName" : "DummyThread-1",
"method" : "upsert",
"lineNumber" : 271,
"module" : "wsgi",
"fileName" : "V:\\git\\cepremap\\src\\widukind-web\\widukind_web\\wsgi.py"
}
"""
is_ajax = request.args.get('json') or request.is_xhr
if not is_ajax:
return render_template("admin/logs.html")
col = current_app.widukind_db[constants.COL_LOGS]
object_list = col.find({})
return current_app.jsonify(list(object_list))
@bp.route('/stats/series', endpoint="stats-series")
@auth.required
def stats_series():
cursor = queries.col_providers().find({}, {"name": True})
provider_names = [doc["name"] for doc in cursor]
result = []
total = 0
for provider in provider_names:
r = {"_id": provider, "count": queries.col_series().count({"provider_name": provider})}
result.append(r)
total += r["count"]
#result = list(queries.col_series().aggregate([{"$group": {"_id": "$provider_name", "count": {"$sum": 1}}}, {"$sort": {"count": -1} }], allowDiskUse=True))
return render_template("admin/stats-series.html",
result=result, total=total)
@bp.route('/stats/datasets', endpoint="stats-datasets")
@auth.required
def stats_datasets():
result = list(queries.col_datasets().aggregate([{"$group": {"_id": "$provider_name", "count": {"$sum": 1}}}, {"$sort": {"count": -1} }], allowDiskUse=True))
total = 0
for r in result:
total += r["count"]
return render_template("admin/stats-datasets.html",
result=result, total=total)
@bp.route('/stats/run', endpoint="stats-run")
@auth.required
def stats_run_html():
return render_template("admin/stats-run.html")
@bp.route('/stats/run/json', endpoint="stats-run-json")
@auth.required
def stats_run_json():
query = {}
provider_slug = request.args.get('provider')
dataset_slug = request.args.get('dataset')
if dataset_slug:
dataset = queries.get_dataset(dataset_slug)
query["provider_name"] = dataset["provider_name"]
query["dataset_code"] = dataset["dataset_code"]
elif provider_slug:
provider = queries.get_provider(provider_slug)
query["provider_name"] = provider["name"]
startDate = arrow.get(request.args.get('startDate')).floor('day').datetime
endDate = arrow.get(request.args.get('endDate')).ceil('day').datetime
query["created"] = {"$gte": startDate, "$lte": endDate}
limit = request.args.get('limit', default=100, type=int)
cursor = queries.col_stats_run().find(query)
if limit:
cursor = cursor.limit(limit)
count = cursor.count()
cursor = cursor.sort("created", -1)
rows = [doc for doc in cursor]
"""
for row in rows:
row["view"] = url_for("views.dataset-by-code",
provider_name=row["provider_name"],
dataset_code=row["dataset_code"])
"""
return json_tools.json_response(rows, {"total": count})
@bp.route('/contacts', endpoint="contacts")
@auth.required
def contacts_view():
is_ajax = request.args.get('json') or request.is_xhr
if not is_ajax:
return render_template("admin/contacts.html")
col = queries.col_contact()
object_list = col.find({}).sort("created", DESCENDING)
result = []
for obj in object_list:
obj["view"] = url_for(".doc", col=constants.COL_CONTACT, objectid=obj["_id"])
result.append(obj)
return current_app.jsonify(result)
| Widukind/widukind-web | widukind_web/admin.py | Python | agpl-3.0 | 9,502 |
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import timezone
from django.core import mail
from ovp.apps.users.tests.fixture import UserFactory
from ovp.apps.organizations.tests.fixture import OrganizationFactory
from ovp.apps.projects.models import Project, Apply, Job, Work
from ovp.apps.core.helpers import get_email_subject
from server.celery_tasks import app
@override_settings(DEFAULT_SEND_EMAIL='sync',
CELERY_TASK_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_TASK_ALWAYS_EAGER=True)
class TestEmailTriggers(TestCase):
def setUp(self):
self.user = UserFactory.create(
email='testmail-projects@test.com',
password='test_returned',
object_channel='default'
)
self.organization = OrganizationFactory.create(
name='test org', owner=self.user,
type=0, object_channel='default'
)
self.project = Project.objects.create(
name='test project', slug='test-slug',
details='abc', description='abc',
owner=self.user, organization=self.organization,
published=False, object_channel='default'
)
self.project.published = True
self.project.save()
app.control.purge()
def test_applying_schedules_interaction_confirmation_email(self):
"""
Assert cellery task to ask about interaction
is created when user applies to project
"""
mail.outbox = []
Apply.objects.create(user=self.user, project=self.project, object_channel='default')
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, get_email_subject(
'default', 'atados-askProjectInteractionConfirmation-toVolunteer', 'Ask project confirmation'
))
self.assertIn('vaga test project', mail.outbox[0].body)
def test_applying_schedules_reminder_email(self):
"""
Assert cellery task to remind volunteer
is created when user applies to project
"""
mail.outbox = []
Job.objects.create(
start_date=timezone.now(), end_date=timezone.now(),
project=self.project, object_channel='default'
)
Apply.objects.create(user=self.user, project=self.project, object_channel='default')
self.assertEqual(len(mail.outbox), 4)
self.assertEqual(mail.outbox[1].subject, 'Uma ação está chegando... estamos ansiosos para te ver.')
self.assertIn('test project', mail.outbox[1].body)
def test_applying_schedules_ask_about_project_experience_to_volunteer(self):
"""
Assert cellery task to ask volunteer about project
experience is created when user applies to project
"""
mail.outbox = []
work = Work.objects.create(project=self.project, object_channel='default')
Apply.objects.create(user=self.user, project=self.project, object_channel='default')
self.assertEqual(len(mail.outbox), 3)
self.assertEqual(mail.outbox[1].subject, 'Conta pra gente como foi sua experiência?')
self.assertIn('>test project<', mail.outbox[1].alternatives[0][0])
mail.outbox = []
work.delete()
Job.objects.create(
start_date=timezone.now(), end_date=timezone.now(),
project=self.project, object_channel='default'
)
Apply.objects.create(user=self.user, project=self.project, object_channel='default')
self.assertEqual(mail.outbox[2].subject, 'Conta pra gente como foi sua experiência?')
self.assertIn('>test project<', mail.outbox[2].alternatives[0][0])
def test_publishing_project_schedules_ask_about_experience_to_organization(self):
"""
Assert cellery task to ask organization about project
experience is created when user project is published
"""
mail.outbox = []
project = Project.objects.create(
name='test project', slug='test-slug', details='abc',
description='abc', owner=self.user, published=False,
organization=self.organization, object_channel='default'
)
Work.objects.create(project=project, object_channel='default')
project.published = True
project.save()
self.assertEqual(len(mail.outbox), 3)
self.assertEqual(mail.outbox[2].subject, 'Tá na hora de contar pra gente como foi')
self.assertIn('>test project<', mail.outbox[2].alternatives[0][0])
| atados/atados-ovp | api/channels/default/tests/test_tasks.py | Python | agpl-3.0 | 4,585 |
if __name__=="__main__":
import sys
sys.path.extend([".", "lib", "app"])
import webnotes, os
import utilities.demo.make_demo
def make_demo_app():
webnotes.mute_emails = 1
webnotes.connect()
utilities.demo.make_demo.make(reset=True, simulate=False)
# setup demo user etc so that the site it up faster, while the data loads
make_demo_user()
make_demo_login_page()
make_demo_on_login_script()
utilities.demo.make_demo.make(reset=False, simulate=True)
def make_demo_user():
roles = ["Accounts Manager", "Analytics", "Expense Approver", "Accounts User",
"Leave Approver", "Blogger", "Customer", "Sales Manager", "Employee", "Support Manager",
"HR Manager", "HR User", "Maintenance Manager", "Maintenance User", "Material Manager",
"Material Master Manager", "Material User", "Manufacturing Manager",
"Manufacturing User", "Projects User", "Purchase Manager", "Purchase Master Manager",
"Purchase User", "Quality Manager", "Report Manager", "Sales Master Manager",
"Sales User", "Supplier", "Support Team"]
def add_roles(bean):
for role in roles:
p.doclist.append({
"doctype": "UserRole",
"parentfield": "user_roles",
"role": role
})
# make demo user
if webnotes.conn.exists("Profile", "demo@owrang.yellowen.com"):
webnotes.delete_doc("Profile", "demo@owrang.yellowen.com")
p = webnotes.new_bean("Profile")
p.doc.email = "demo@owrang.yellowen.com"
p.doc.first_name = "Demo"
p.doc.last_name = "User"
p.doc.enabled = 1
p.doc.user_type = "Owrang Demo"
p.doc.send_invite_email = 0
p.doc.new_password = "demo"
p.insert()
add_roles(p)
p.save()
# make system manager user
if webnotes.conn.exists("Profile", "admin@owrang.yellowen.com"):
webnotes.delete_doc("Profile", "admin@owrang.yellowen.com")
p = webnotes.new_bean("Profile")
p.doc.email = "admin@owrang.yellowen.com"
p.doc.first_name = "Admin"
p.doc.last_name = "User"
p.doc.enabled = 1
p.doc.user_type = "System User"
p.doc.send_invite_email = 0
p.doc.new_password = "admin010123"
p.insert()
roles.append("System Manager")
add_roles(p)
p.save()
# only read for newsletter
webnotes.conn.sql("""update `tabDocPerm` set `write`=0, `create`=0, `cancel`=0
where parent='Newsletter'""")
webnotes.conn.sql("""update `tabDocPerm` set `write`=0, `create`=0, `cancel`=0
where parent='Profile' and role='All'""")
webnotes.conn.commit()
def make_demo_login_page():
webnotes.conn.set_value("Website Settings", None, "home_page", "")
webnotes.conn.sql("""delete from `tabWeb Page` where name='demo-login'""")
p = webnotes.new_bean("Web Page")
p.doc.title = "Demo Login"
p.doc.published = 1
p.doc.description = "Owrang Demo Login"
with open(os.path.join(os.path.dirname(__file__), "demo-login.html"), "r") as dfile:
p.doc.main_section = dfile.read()
p.doc.insert_code = 1
with open(os.path.join(os.path.dirname(__file__), "demo-login.js"), "r") as dfile:
p.doc.javascript = dfile.read()
p.doc.insert_style = 1
with open(os.path.join(os.path.dirname(__file__), "demo-login.css"), "r") as dfile:
p.doc.css = dfile.read()
p.insert()
website_settings = webnotes.bean("Website Settings", "Website Settings")
website_settings.doc.home_page = "demo-login"
website_settings.doc.disable_signup = 1
website_settings.save()
webnotes.conn.commit()
def make_demo_on_login_script():
webnotes.conn.sql("""delete from `tabCustom Script` where dt='Control Panel'""")
s = webnotes.new_bean("Custom Script")
s.doc.dt = "Control Panel"
s.doc.script_type = "Server"
with open(os.path.join(os.path.dirname(__file__), "demo_control_panel.py"), "r") as dfile:
s.doc.script = dfile.read()
s.insert()
cp = webnotes.bean("Control Panel")
cp.doc.custom_startup_code = """wn.ui.toolbar.show_banner('You are using Owrang Demo. To start your own Owrang Trial, <a href="https://owrang.yellowen.com/pricing-and-signup" target="_blank">click here</a>')"""
cp.save()
webnotes.conn.commit()
if __name__=="__main__":
make_demo_app() | Yellowen/Owrang | utilities/demo/make_erpnext_demo.py | Python | agpl-3.0 | 3,982 |
# Copyright 2022 Camptocamp SA (https://www.camptocamp.com).
# @author Iván Todorovich <ivan.todorovich@camptocamp.com>
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from lxml import etree
from odoo.tests import HttpCase, tagged
from odoo.addons.mail.tests.common import MockEmail
@tagged("-at_install", "post_install")
class WebsiteSaleHttpCase(HttpCase, MockEmail):
def setUp(self):
super().setUp()
self.env = self.env(context=dict(self.env.context, tracking_disable=True))
self.mailing_list = self.env.ref("mass_mailing.mailing_list_data")
self.mailing_contact = self.env["mailing.contact"].create(
{
"name": "John Doe",
"email": "john.doe@example.com",
}
)
def test_subscription_email_unsubscribe_from_list(self):
# Create subscription
with self.mock_mail_gateway():
subs = self.env["mailing.contact.subscription"].create(
{
"contact_id": self.mailing_contact.id,
"list_id": self.mailing_list.id,
}
)
body = self._new_mails._send_prepare_values()["body"]
root = etree.fromstring(body, etree.HTMLParser())
anchor = root.xpath("//a[@href]")[0]
unsubscribe_url = anchor.attrib["href"]
web_base_url = self.env["ir.config_parameter"].sudo().get_param("web.base.url")
self.url_open(unsubscribe_url.replace(web_base_url, ""))
subs.invalidate_cache()
self.assertEqual(subs.opt_out, True)
| OCA/social | mass_mailing_subscription_email/tests/test_unsubscribe_from_list.py | Python | agpl-3.0 | 1,590 |
from openerp.osv import osv, fields
class IrActionsActWindowMenu(osv.Model):
_name = 'ir.actions.act_window.menu'
_description = 'Menu on the actions'
_columns = {
'name': fields.char('Label', size=64, required=True, translate=True),
'active': fields.boolean(
'Active', help='if check, this object is always available'),
}
_defaults = {
'active': True,
}
class IrActionsActWindowButton(osv.Model):
_name = 'ir.actions.act_window.button'
_description = 'Button to display'
_order = 'name'
_columns = {
'action_from_id': fields.many2one('ir.actions.act_window', 'from Action',
required=True),
'action_to_open_id': fields.many2one('ir.actions.actions', 'to Action',
required=True),
'name': fields.char('Label', size=64, required=True, translate=True),
'menu_id': fields.many2one('ir.actions.act_window.menu', 'Menu'),
'active': fields.boolean(
'Active', help='if check, this object is always available'),
'visibility_model_name': fields.char(u"Modele",
help=u"Model where visible_button_method_name is"
u"define to manage the button visibility."),
'visible_button_method_name': fields.char('Visibility method name',
help=u"Method that tell if the button should be "
u"visible or not, return True if it must "
u"be visible False otherwise."
u"def Method(cr, uid, context=None)"),
}
_defaults = {
'active': True,
}
def format_buttons(self, cr, uid, ids, context=None):
res = {}
action = self.pool.get('ir.actions.actions')
def get_action(action_id):
model = self.pool.get(action.read(cr, uid, action_id, ['type'],
context=context)['type'])
return model.read(cr, uid, action_id, [], load="_classic_write",
context=context)
for this in self.browse(cr, uid, ids, context=context):
if not this.active:
continue
if this.menu_id:
if not this.menu_id.active:
continue
if this.visibility_model_name and this.visible_button_method_name:
model = self.pool.get(this.visibility_model_name)
if not getattr(model, this.visible_button_method_name)(
cr, uid, context=context):
continue
menu = this.menu_id.name if this.menu_id else False
if menu not in res.keys():
res[menu] = []
val = get_action(this.action_to_open_id.id)
val.update({'name': this.name})
res[menu].append(val)
return res
class IrActionsActWindow(osv.Model):
_inherit = 'ir.actions.act_window'
_columns = {
'buttons_ids': fields.one2many('ir.actions.act_window.button',
'action_from_id', 'Buttons'),
}
def get_menus_and_buttons(self, cr, uid, ids, context=None):
res = {}
button = self.pool.get('ir.actions.act_window.button')
for this in self.browse(cr, uid, ids, context=context):
res[this.id] = button.format_buttons(
cr, uid, [x.id for x in this.buttons_ids], context=context)
return res
| vnsofthe/odoo-dev | addons/web_action_add_button/base.py | Python | agpl-3.0 | 3,729 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import models
| sysadminmatmoz/odoo-clearcorp | purchase_prediction/__init__.py | Python | agpl-3.0 | 1,003 |
'''
Created on Sep 14, 2013
@author: paepcke
Modifications:
- Jan 1, 2013: added remove() method to OutputFile
'''
import StringIO
from collections import OrderedDict
import csv
import re
import sys
import os
import tempfile
from col_data_type import ColDataType
class OutputDisposition(object):
'''
Specifications for where completed relation rows
should be deposited, and in which format. Current
output options are to files, and to stdout.
This class is abstract, but make sure the subclasses
invoke this super's __init__() when they are initialized.
Also defined here are available output formats, of
which there are two: CSV, and SQL insert statements AND
CSV.
NOTE: currently the CSV-only format option is broken. Not
enough time to maintain it.
SQL insert statements that are directed to files will also
generate equivalent .csv files. The insert statement files
will look like the result of a mysqldump, and inserts into
different tables are mixed. The corresponding (values-only)
csv files are split: one file for each table.
'''
def __init__(self, outputFormat, outputDestObj=None):
'''
:param outputDestObj: instance of one of the subclasses
:type outputDestObj: Subclass(OutputDisposition)
'''
self.outputFormat = outputFormat
if outputDestObj is None:
self.outputDest = self
else:
self.outputDest = outputDestObj
self.csvTableFiles = {}
self.schemas = TableSchemas()
def __enter__(self):
return self.outputDest
def __exit__(self,excType, excValue, excTraceback):
try:
self.outputDest.close()
except:
# If the conversion itself went fine, then
# raise this exception from the closing attempt.
# But if the conversion failed, then have the
# system re-raise that earlier exception:
if excValue is None:
raise IOError("Could not close the output of the conversion: %s" % sys.exc_info()[0])
# Return False to indicate that if the conversion
# threw an error, the exception should now be re-raised.
# If the conversion worked fine, then this return value
# is ignored.
return False
def flush(self):
self.outputDest.flush()
def getOutputFormat(self):
return self.outputFormat
def addSchemaHints(self, tableName, schemaHints):
'''
Provide a schema hint dict for the table of the given name.
:param tableName: name of table to which schema applies. The name may be None, in which case it refers to the main (default) table.
:type tableName: String
:param schemaHints: dict mapping column names to SQL types via ColumnSpec instances
:type schemaHints: [ordered]Dict<String,ColumnSpec>
'''
self.schemas.addColSpecs(tableName, schemaHints)
def getSchemaHint(self, colName, tableName):
'''
Given a column name, and a table name, return the ColumnSpec object
that describes that column. If tableName is None, the main (default)
table's schema will be searched for a colName entry
:param colName: name of column whose schema info is sought
:type colName: String
:param tableName: name of table in which the given column resides
:type tableName: String
:return: list of ColumnSpec instances
:rtype: (ColumnSpec)
@raise KeyError: if table or column are not found
'''
return self.schemas[tableName][colName]
def getSchemaHintByPos(self, pos, tableName):
try:
return self.schemas[tableName].values()[pos]
except ValueError:
return None
except IndexError:
raise ValueError("Attempt to access pos %s in schema for table %s, which is shorter than %s: %s") %\
(str(pos), tableName, str(pos), self.schemas[tableName].values())
def getSchema(self, tableName):
try:
return self.schemas[tableName].values()
except ValueError:
return None
def copySchemas(self, destDisposition):
'''
Given another instance of OutputDisposition,
copy this instance's schemas to the destination.
:param destDisposition: another instance of OutputDisposition
:type destDisposition: OutputDisposition
'''
destDisposition.schemas = self.schemas
def ensureColExistence(self, colName, colDataType, jsonToRelationConverter, tableName=None):
'''
Given a column name and MySQL datatype name, check whether this
column has previously been encountered. If not, a column information
object is created, which will eventually be used to create the column
header, or SQL alter statements.
:param colName: name of the column to consider
:type colName: String
:param colDataType: datatype of the column.
:type colDataType: ColDataType
:param tableName: name of table to which the column is to belong; None if for main table
:type tableName: {String | None}
'''
schemaDict = self.schemas[tableName]
if schemaDict is None or len(schemaDict) == 0:
# schema for this table definitely does not have the column:
colSpecObj = ColumnSpec( colName, colDataType, jsonToRelationConverter)
self.schemas[tableName] = OrderedDict({colName : colSpecObj})
return
# Have schema (dict) for this table. Does that dict contain
# an entry for the col name?
try:
schemaDict[colName]
# all set:
return
except KeyError:
colSpecObj = ColumnSpec( colName, colDataType, jsonToRelationConverter)
schemaDict[colName] = colSpecObj
def createTmpTableFile(self, tableName, fileSuffix):
'''
Used for cases in which parsers must create more than one
table. Those tables need to be written to disk, even when
output of the main table is piped.
:param tableName: name by which the table file obj can be retrieved
:type tableName: String
:param fileSuffix: suffix for temp file name. Ex. 'csv' for CSV outputs, or 'sql' for SQL dumps
:type fileSuffix: String
:return: file object open for writing
:rtype: File
'''
self.csvTableFiles[tableName] = tempfile.NamedTemporaryFile(prefix='tmpTable',
suffix=fileSuffix)
return self.csvTableFiles[tableName]
#--------------------- Available Output Formats
class OutputFormat():
CSV = 0
SQL_INSERT_STATEMENTS = 1
SQL_INSERTS_AND_CSV = 2
#--------------------- Available Output Destination Options:
class OutputPipe(OutputDisposition):
def __init__(self, outputFormat):
super(OutputPipe, self).__init__(outputFormat)
self.fileHandle = sys.stdout
# Make file name accessible as property just like
# Python file objects do:
self.name = "<stdout>" # @UnusedVariable
self.csvWriter = csv.writer(sys.stdout, dialect='excel', delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
self.tableCSVWriters = {}
def close(self):
pass # don't close stdout
def flush(self):
sys.stdout.flush()
def __str__(self):
return "<OutputPipe:<stdout>"
def writerow(self, colElementArray, tableName=None):
# For CSV: make sure everything is a string:
if self.outputFormat == OutputDisposition.OutputFormat.CSV:
row = map(str,colElementArray)
if tableName is None:
self.csvWriter.writerow(row)
else:
self.tableCSVWriters[tableName].writerow(row)
else:
print(colElementArray)
def startNewTable(self, tableName, schemaHintsNewTable):
'''
Called when parser needs to create a table beyond
the main table.
:param schemaHintsNewTable:
:type schemaHintsNewTable:
'''
self.addSchemaHints(tableName, schemaHintsNewTable)
tmpTableFile = self.createTmpTableFile(tableName, 'csv')
self.tableCSVWriters[tableName] = csv.writer(tmpTableFile,
dialect='excel',
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
def write(self, whatToWrite):
'''
Write given string straight to the output. No assumption made about the format
:param whatToWrite:
:type whatToWrite:
'''
sys.stdout.write(whatToWrite)
sys.stdout.flush()
def getCSVTableOutFileName(self, tableName):
return self.name
class OutputFile(OutputDisposition):
# When looking at INSERT INTO tableName (...,
# grab 'tableName':
TABLE_NAME_PATTERN = re.compile(r'[^\s]*\s[^\s]*\s([^\s]*)\s')
# When looking at:" ('7a286e24_b578_4741_b6e0_c0e8596bd456','Mozil...);\n"
# grab everything inside the parens, including the trailing ');\n', which
# we'll cut out in the code:
VALUES_PATTERN = re.compile(r'^[\s]{4}\(([^\n]*)\n{0,1}')
def __init__(self, fileName, outputFormat, options='ab'):
'''
Create instance of an output file destination for converted log files.
Such an instance is created both for OutputFormat.SQL_INSERT_STATEMENTS and
for OutputFormat.CSV. In the Insert statements case the fileName is the file
where all INSERT statements are placed; i.e. the entire dump. If the output format
is CSV, then the fileName is a prefix for the file names of each generated CSV file
(one file for each table).
:param fileName: fully qualified name of output file for CSV (in case of CSV-only),
or MySQL INSERT statement dump
:type fileName: String
:param outputFormat: whether to output CSV or MySQL INSERT statements
:type outputFormat: OutputDisposition.OutputFormat
:param options: output file options as per Python built-in 'open()'. Defaults to append/binary. The
latter for compatibility with Windows
:type options: String
'''
super(OutputFile, self).__init__(outputFormat)
# Make file name accessible as property just like
# Python file objects do:
self.name = fileName # @UnusedVariable
self.outputFormat = outputFormat
# Open the output file as 'append' and 'binary'
# The latter is needed for Windows.
self.fileHandle = open(fileName, options)
self.csvWriter = csv.writer(sys.stdout, dialect='excel', delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
if outputFormat == OutputDisposition.OutputFormat.CSV or\
outputFormat == OutputDisposition.OutputFormat.SQL_INSERTS_AND_CSV:
# Prepare for CSV files needed for the tables:
self.tableCSVWriters = {}
def close(self):
self.fileHandle.close()
# Also close any CSV out files that might exist:
try:
for csvFD in self.csvTableFiles.values():
csvFD.close()
except:
pass
def flush(self):
self.fileHandle.flush()
for csvFD in self.tableCSVWriters.values():
try:
csvFD.flush()
except:
pass
def remove(self):
try:
os.remove(self.fileHandle.name)
except:
pass
def __str__(self):
return "<OutputFile:%s>" % self.getFileName()
def getFileName(self, tableName=None):
'''
Get file name of a MySQL INSERT statement outfile,
or, given a table name, the name of the outfile
for CSV destined to the given table.
:param tableName:
:type tableName:
'''
if tableName is None:
return self.name
else:
fd = self.csvTableFiles.get(tableName, None)
if fd is None:
return None
return fd.name
def writerow(self, colElementArray, tableName=None):
'''
How I wish Python had parameter type based polymorphism. Life
would be so much cleaner.
ColElementArray is either an array of values (coming from
a CSV-only parser), or a string that contains a complete
MySQL INSERT statement (from MySQL dump-creating parsers).
In the first case, we ensure all elements in the array are
strings, and write to output. In the latter case we write
the INSERT statements to their output file. Then, if output
format is SQL_INSERTS_AND_CSV, we also extract the MySQL
values and write them to the proper CSV file.
:param colElementArray: either a MySQL INSERT statement, or an array of values
:type colElementArray: {String | [string]}
:param tableName: name of table to which output is destined. Only needed for
value arrays from CSV-only parsers. Their value arrays don't contain
info on the destination table. INSERT statements do contain the destination table
name.
:type tableName: String
'''
if isinstance(colElementArray, list):
# Simple CSV array of values;
# make sure every array element is a string:
row = map(str,colElementArray)
if tableName is None:
# The main (and maybe only) table:
self.csvWriter.writerow(row)
else:
# One of the other tables for which files
# were opened during calls to startNewTable():
self.tableCSVWriters[tableName].writerow(row)
else:
# We are either outputting INSERT statements, or
# both those and CSV, or just CSV derived from a
# full MySQL INSERT parser, like edxTrackLogJSONParser.
# Start with the INSERTS:
if self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERT_STATEMENTS or\
self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERTS_AND_CSV:
self.fileHandle.write(colElementArray + '\n')
# If we are outputting either CSV or INSERTs and CSV, do the CSV
# part now:
if self.outputFormat != OutputDisposition.OutputFormat.SQL_INSERT_STATEMENTS:
# Strip the CSV parts out from the INSERT statement, which may
# contain multiple VALUE statements:
self.writeCSVRowsFromInsertStatement(colElementArray)
def write(self, whatToWrite):
'''
Write given string straight to the output. No assumption made about the format
:param whatToWrite:
:type whatToWrite:
'''
self.fileHandle.write(whatToWrite)
self.fileHandle.flush()
def startNewTable(self, tableName, schemaHintsNewTable):
'''
Called when parser needs to create a table beyond
the main table (in case of CSV-Only), or any table
in case of SQLInsert+CSV.
:param tableName: name of new table
:type tableName: string
:param schemaHintsNewTable: map column name to column SQL type
:type schemaHintsNewTable: {String,ColDataType}
'''
self.addSchemaHints(tableName, schemaHintsNewTable)
if self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERT_STATEMENTS:
return
# We are producing CSV (possibly in addition to Inserts):
try:
# Already have a table writer for this table?
self.tableCSVWriters[tableName]
return # yep
except KeyError:
# OK, really is a new table caller is starting:
pass
# Ensure that we have an open FD to write to for this table:
if self.outputFormat == OutputDisposition.OutputFormat.CSV or\
self.outputFormat == OutputDisposition.OutputFormat.SQL_INSERTS_AND_CSV:
self.ensureOpenCSVOutFileFromTableName(tableName)
def ensureOpenCSVOutFileFromTableName(self, tableName):
'''
Checks whether an open File object exists for the given
table. If not, creates one. Returns the FD. The output
file is created in the same directory as self.out
:param tableName: name of table whose CSV output file we are to check for, or create
:type tableName: String
:return: a File object open for writing/appending
:rtype: File
'''
try:
# If we already have an FD for this table, return:
return self.tableCSVWriters[tableName]
except KeyError:
# Else create one below:
pass
outFileName = self.getFileName()
if outFileName == '/dev/null':
outFile = open('/dev/null', 'ab')
self.csvTableFiles[tableName] = outFile
return outFile
csvOutFileName = self.getCSVTableOutFileName(tableName)
outFile = open(csvOutFileName, 'w')
self.csvTableFiles[tableName] = outFile
self.tableCSVWriters[tableName] = csv.writer(outFile,
dialect='excel',
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
return self.tableCSVWriters[tableName]
def getCSVTableOutFileName(self, tableName):
# The 'None' below ensures that we get the
# main file's name back:
return "%s_%sTable.csv" % (self.getFileName(None), tableName)
def writeCSVRowsFromInsertStatement(self, insertStatement):
'''
Takes one SQL INSERT INTO Statement, possibly including multiple VALUES
lines. Extracts the destination table and the values list(s), and writes
them to disk via the appropriate CSVWriter. The INSERT statements are
expected to be very regular, generated by json_to_relation. Don't use
this method for arbitrary INSERT statements, b/c it relies on regular
expressions that expect the specific format. Prerequisite: self.tableCSVWriters
is a dictionary that maps table names into File objects that are open
for writing.
:param insertStatement: Well-formed MySQL INSERT statement
:type insertStatement: String
@raise ValueError: if table name could not be extracted from the
INSERT statement, or if the insertStatement contains no VALUES
clause.
'''
inFD = StringIO.StringIO(insertStatement)
try:
firstLine = inFD.readline()
# Pick out the name of the table to which CSV is to be added:
tblNameMatch = OutputFile.TABLE_NAME_PATTERN.search(firstLine)
if tblNameMatch is None:
raise ValueError('No match when trying to extract table name from "%s"' % insertStatement)
tblName = tblNameMatch.group(1)
except IndexError:
raise ValueError('Could not extract table name from "%s"' % insertStatement)
readAllValueTuples = False
while not readAllValueTuples:
# Get values list that belongs to this insert statement:
valuesLine = inFD.readline()
if not valuesLine.startswith(' ('):
readAllValueTuples = True
continue
# Extract the comma-separated values list out from the parens;
# first get "'fasdrew_fdsaf...',...);\n":
oneValuesLineMatch = OutputFile.VALUES_PATTERN.search(valuesLine)
if oneValuesLineMatch is None:
# Hopefully never happens:
raise ValueError('No match for values line "%s"' % insertStatement)
# Get just the comma-separated values list from
# 'abfd_sfd,...);\n
valuesList = oneValuesLineMatch.group(1)[:-2] + '\n'
# Make sure we've seen additions to this table before or,
# if not, have a CSV writer and a file created to receive
# the CSV lines:
self.ensureOpenCSVOutFileFromTableName(tblName)
theOutFd = self.csvTableFiles[tblName]
theOutFd.write(valuesList)
class ColumnSpec(object):
'''
Housekeeping class. Each instance represents the name,
position, and datatype of one column. These instances are
used to generate column name headers, and
SQL insert statements.
'''
def __init__(self, colName, colDataType, jsonToRelationProcessor):
'''
Create a ColumnSpec instance.
:param colName: name of column
:type colName: String
:param colDataType: data type of column (an enum)
:type colDataType: ColumnSpec
:param jsonToRelationProcessor: associated JSON to relation JSONToRelation instance
:type jsonToRelationProcessor: JSONToRelation
'''
self.colName = colName
self.colDataType = colDataType
self.colPos = jsonToRelationProcessor.getNextNewColPos()
jsonToRelationProcessor.bumpNextNewColPos()
def getDefaultValue(self):
return ColDataType().defaultValues[self.colDataType]
def getName(self):
'''
Return column name
:return: name of column
:rtype: String
'''
return self.colName
def getType(self):
'''
Return SQL type
:return: SQL type of colum in upper case
:rtype: String
'''
return ColDataType().toString(self.colDataType).upper()
def getSQLDefSnippet(self):
'''
Return string snippet to use in SQL CREATE TABLE or ALTER TABLE
statement
'''
return " %s %s" % (self.getName(), self.getType())
def __str__(self):
return "<Col %s: %s (position %s)>" % (self.colName,
self.getType(),
self.colPos)
def __repr__(self):
return self.__str__()
class TableSchemas(object):
'''
Repository for the schemas of all tables. A schema is an
array ColumnSpec instances. Each such list is associated with
one relational table. A class var dict holds the schemas for
all tables.
'''
def __init__(self):
self.allSchemas = OrderedDict()
# Add empty schema for main (default) table:
self.allSchemas[None] = OrderedDict()
def __getitem__(self, tableName):
return self.allSchemas[tableName]
def __setitem__(self, tableName, colSpecsDict):
self.allSchemas[tableName] = colSpecsDict
def keys(self):
return self.allSchemas.keys()
def addColSpec(self, tableName, colSpec):
try:
schema = self.allSchemas[tableName]
except KeyError:
self.allSchemas[tableName] = {colSpec.getName() : colSpec}
schema = self.allSchemas[tableName]
schema[colSpec.getName()] = colSpec
def addColSpecs(self, tableName, colSpecsDict):
if not isinstance(colSpecsDict, OrderedDict):
raise ValueError("ColumSpec parameter must be a dictionary<ColName,ColumnSpec>")
try:
schema = self.allSchemas[tableName]
except KeyError:
self.allSchemas[tableName] = colSpecsDict
schema = self.allSchemas[tableName]
# Change schema to include the new dict:
schema.update(colSpecsDict)
| EDUlib/eTracesX | Translation_software/edx_to_MOOCdb_piping/import.openedx.apipe/json_to_relation/output_disposition.py | Python | agpl-3.0 | 24,333 |
# Make sure you name your file with className.py
from hint.hint_class_helpers.find_matches import find_matches
class Prob2_Part1:
"""
Author: Shen Ting Ang
Date: 10/11/2016
"""
def check_attempt(self, params):
self.attempt = params['attempt'] #student's attempt
self.answer = params['answer'] #solution
self.att_tree = params['att_tree'] #attempt tree
self.ans_tree = params['ans_tree'] #solution tree
matches = find_matches(params)
matching_node = [m[0] for m in matches]
try:
if '^' not in self.attempt:
hint='Missing ^ in the answer. '
return hint + 'What is the probability of a specific combination of 3 coin flips? ', '1/2^3'
#check if the form of the parse tree has the right
#shape: an operator and two leafs that correspond to
#the operands
elif 'C(' not in self.attempt and '!' not in self.attempt:
hint='Missing choose function in the answer. '
return hint + 'How many possible ways are there to get 2 questions correct out of 5 questions? C(5,_)', '2'
else:
return "",""
except Exception:
return '',''
def get_problems(self):
self.problem_list = ["Combinatorics/GrinsteadSnell3.2.18/part1"]
return self.problem_list
| zhenzhai/edx-platform | common/lib/sandbox-packages/hint/hint_class/Week3/Prob2_Part1.py | Python | agpl-3.0 | 1,401 |
from collections import namedtuple
import configparser
from functools import lru_cache
import unittest
from unittest.mock import patch, mock_open
import transaction
import testing.postgresql
import webtest
import datetime
from pyramid.config import Configurator
from pyramid.paster import get_app
from sqlalchemy import create_engine
from sqlalchemy.exc import SAWarning
import test_project
import inspect
import os
import urllib
import warnings
import json
from parameterized import parameterized
import pyramid_jsonapi.metadata
from openapi_spec_validator import validate_spec
import pprint
import ltree
from pyramid_jsonapi.permissions import (
Permission,
Targets,
)
from test_project.models import (
DBSession,
Base,
Person,
Blog,
)
from test_project import test_data
cur_dir = os.path.dirname(
os.path.abspath(
inspect.getfile(inspect.currentframe())
)
)
parent_dir = os.path.dirname(cur_dir)
RelHalf = namedtuple('RelSide', 'collection rel many filters')
FilterInfo = namedtuple('FilterInfo', 'att op value')
RelInfo = namedtuple('RelInfo', 'src tgt comment')
rel_infos = (
RelInfo(
RelHalf('people', 'blogs', False, []),
RelHalf(
'blogs', 'owner', True,
[
FilterInfo('title', 'eq', 'owned by 11'),
],
),
'One to many',
),
RelInfo(
RelHalf('blogs', 'owner', True, []),
RelHalf(
'people', 'blogs', False,
[
FilterInfo('name', 'eq', 'one thing'),
]
),
'Many to one'
),
RelInfo(
RelHalf('people', 'articles_by_assoc', True, []),
RelHalf(
'articles_by_assoc', 'authors', True,
[
FilterInfo('title', 'eq', 'Collaborative one.')
]
),
'Many to many by association table'
),
RelInfo(
RelHalf('people', 'articles_by_proxy', True, []),
RelHalf(
'articles_by_obj', None, True,
[
FilterInfo('title', 'eq', 'Collaborative by obj one.')
]
),
'Many to many by association proxy'
),
)
class MyTestApp(webtest.TestApp):
def _check_status(self, status, res):
try:
super()._check_status(status, res)
except webtest.AppError as e:
errors = res.json_body.get('errors', [{}])
raise webtest.AppError(
'%s\n%s',
errors, res.json_body.get('traceback')
)
def setUpModule():
'''Create a test DB and import data.'''
# Create a new database somewhere in /tmp
global postgresql
global engine
postgresql = testing.postgresql.Postgresql(port=7654)
engine = create_engine(postgresql.url())
ltree.add_ltree_extension(engine)
DBSession.configure(bind=engine)
def tearDownModule():
'''Throw away test DB.'''
global postgresql
DBSession.close()
postgresql.stop()
def rels_doc_func(func, i, param):
src, tgt, comment = param[0]
return '{}:{}/{} ({})'.format(func.__name__, src.collection, src.rel, comment)
def make_ri(_type, _id):
return { 'type': _type, 'id': _id }
class DBTestBase(unittest.TestCase):
_test_app = None
@classmethod
def setUpClass(cls):
cls._test_app = cls.new_test_app()
def setUp(self):
Base.metadata.create_all(engine)
# Add some basic test data.
test_data.add_to_db(engine)
transaction.begin()
def tearDown(self):
transaction.abort()
Base.metadata.drop_all(engine)
def test_app(self, options=None):
if (options is None) and self._test_app:
# If there are no options and we have a cached app, return it.
return self._test_app
return self.new_test_app(options)
@staticmethod
def new_test_app(options=None):
'''Create a test app.'''
config_path = '{}/testing.ini'.format(parent_dir)
if options:
tmp_cfg = configparser.ConfigParser()
tmp_cfg.read(config_path)
tmp_cfg['app:main'].update(options or {})
config_path = '{}/tmp_testing.ini'.format(parent_dir)
with open(config_path, 'w') as tmp_file:
tmp_cfg.write(tmp_file)
with warnings.catch_warnings():
# Suppress SAWarning: about Property _jsonapi_id being replaced by
# Propery _jsonapi_id every time a new app is instantiated.
warnings.simplefilter(
"ignore",
category=SAWarning
)
app = get_app(config_path)
test_app = MyTestApp(app)
test_app._pj_app = app
if options:
os.remove(config_path)
return test_app
def evaluate_filter(self, att_val, op, test_val):
if op == 'eq':
return att_val == test_val
else:
raise Exception('Unkown filter op: {}'.format(op))
class TestTmp(DBTestBase):
'''To isolate tests so they can be run individually during development.'''
class TestPermissions(DBTestBase):
'''Test permission handling mechanisms.
'''
def test_get_alter_result_item(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
# Not allowed to see alice (people/1)
pj.view_classes[test_project.models.Person].register_permission_filter(
['read'],
['alter_result'],
lambda obj, *args, **kwargs: obj.object.name != 'alice',
)
# Shouldn't be allowed to see people/1 (alice)
test_app.get('/people/1', status=403)
# Should be able to see people/2 (bob)
test_app.get('/people/2')
def test_get_alter_result_item_individual_attributes(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
def pfilter(obj, view, mask, *args, **kwargs):
if obj.object.name == 'alice':
return view.permission_object(subtract_attributes={'age',})
else:
return True
pj.view_classes[test_project.models.Person].register_permission_filter(
['get'],
['alter_result', ],
pfilter,
)
# Alice should have attribute 'name' but not 'age'.
alice = test_app.get('/people/1').json_body['data']
self.assertIn('name', alice['attributes'])
self.assertNotIn('age', alice['attributes'])
def test_get_alter_result_item_individual_rels(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
def pfilter(obj, view, target, **kwargs):
if obj.object.name == 'alice' and target.name == 'posts':
return False
else:
return True
pj.view_classes[test_project.models.Person].register_permission_filter(
['get'],
['alter_result', ],
pfilter,
target_types=(Targets.relationship,)
)
# Alice should have relationship 'blogs' but not 'posts'.
alice = test_app.get('/people/1').json_body['data']
self.assertIn('blogs', alice['relationships'])
self.assertNotIn('posts', alice['relationships'])
def test_get_alter_result_item_rel_ids(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
# Not allowed to see blogs/1 (one of alice's 2 blogs)
pj.view_classes[test_project.models.Blog].register_permission_filter(
['get'],
['alter_result', ],
lambda obj, *args, **kwargs: obj.object.id != 1,
)
alice = test_app.get('/people/1').json_body['data']
alice_blogs = alice['relationships']['blogs']['data']
self.assertIn({'type': 'blogs', 'id': '2'}, alice_blogs)
self.assertNotIn({'type': 'blogs', 'id': '1'}, alice_blogs)
def test_get_alter_result_item_included_items(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
# Not allowed to see blogs/1 (one of alice's 2 blogs)
pj.view_classes[test_project.models.Blog].register_permission_filter(
['get'],
['alter_result', ],
lambda obj, *args, **kwargs: obj.object.id != 1,
)
included = test_app.get('/people/1?include=blogs').json_body['included']
included_blogs = {
item['id'] for item in included if item['type'] == 'blogs'
}
self.assertNotIn('1', included_blogs)
self.assertIn('2', included_blogs)
def test_get_alter_result_collection(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
# Not allowed to see alice (people/1)
pj.view_classes[test_project.models.Person].register_permission_filter(
['get'],
['alter_result', ],
lambda obj, *args, **kwargs: obj.object.name != 'alice',
)
# Make sure we get the lowest ids with a filter.
ret = test_app.get('/people?filter[id:lt]=3').json_body
people = ret['data']
ppl_ids = { person['id'] for person in people }
self.assertNotIn('1', ppl_ids)
self.assertIn('2', ppl_ids)
def test_get_alter_result_collection_meta_info(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
# Not allowed to see alice (people/1)
pj.view_classes[test_project.models.Person].register_permission_filter(
['get'],
['alter_result', ],
lambda obj, *args, **kwargs: obj.object.name != 'alice',
)
# Make sure we get the lowest ids with a filter.
res = test_app.get('/people?filter[id:lt]=3').json_body
meta = res['meta']
self.assertIn('people::1', meta['rejected']['objects'])
def test_related_get_alter_result(self):
'''
'related' link should fetch only allowed related resource(s).
'''
test_app = self.test_app({})
pj = test_app._pj_app.pj
# Not allowed to see blog with title 'main: alice' (aka blogs/1)
pj.view_classes[test_project.models.Blog].register_permission_filter(
['get'],
['alter_result', ],
lambda obj, *args, **kwargs: obj.object.title != 'main: alice',
)
r = test_app.get('/people/1/blogs').json_body
data = r['data']
ids = {o['id'] for o in data}
self.assertIsInstance(data, list)
self.assertNotIn('1', ids)
# Not allowed to see alice (people/1)
pj.view_classes[test_project.models.Person].register_permission_filter(
['get'],
['alter_result', ],
lambda obj, *args, **kwargs: obj.object.name != 'alice',
)
r = test_app.get('/blogs/2/owner', status=403)
def test_post_alterreq_collection(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
# Not allowed to post the name "forbidden"
def pfilter(obj, view, **kwargs):
return obj['attributes'].get('name') != 'forbidden'
pj.view_classes[test_project.models.Person].register_permission_filter(
['post'],
['alter_request'],
pfilter,
)
# Make sure we can't post the forbidden name.
test_app.post_json(
'/people',
{
'data': {
'type': 'people',
'attributes': {
'name': 'forbidden'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=403
)
# Make sure we can post some other name.
test_app.post_json(
'/people',
{
'data': {
'type': 'people',
'attributes': {
'name': 'allowed'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
)
def test_post_alterreq_collection_with_rels(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
# def blogs_pfilter(obj, *args, **kwargs):
# return {'attributes': True, 'relationships': True}
# pj.view_classes[test_project.models.Blog].register_permission_filter(
# ['post'],
# ['alter_request'],
# blogs_pfilter,
# )
# /people: allow POST to all atts and to 3 relationships.
def people_pfilter(obj, view, target, **kwargs):
# if target.name == 'posts':
# print(obj['type'], obj['relationships']['posts'])
return view.permission_object(
True,
{'comments', 'articles_by_proxy', 'articles_by_assoc'}
)
pj.view_classes[test_project.models.Person].register_permission_filter(
['post'],
['alter_request'],
people_pfilter,
)
# /comments: allow PATCH (required to set 'comments.author') on all
# but comments/4.
pj.view_classes[test_project.models.Comment].register_permission_filter(
['patch'],
['alter_request'],
lambda obj, *args, **kwargs: obj['id'] not in {'4'}
)
# /articles_by_assoc: allow POST (required to add people/new to
# 'articles_by_assoc.authors') on all but articles_by_assoc/11.
pj.view_classes[test_project.models.ArticleByAssoc].register_permission_filter(
['post'],
['alter_request'],
lambda obj, *args, **kwargs: obj['id'] not in {'11'}
)
pj.view_classes[test_project.models.ArticleByObj].register_permission_filter(
['post'],
['alter_request'],
lambda obj, *args, **kwargs: obj['id'] not in {'10'}
)
person_in = {
'data': {
'type': 'people',
'attributes': {
'name': 'post perms test'
},
'relationships': {
'posts': {
'data': [
{'type': 'posts', 'id': '20'},
{'type': 'posts', 'id': '21'}
]
},
'comments': {
'data': [
{'type': 'comments', 'id': '4'},
{'type': 'comments', 'id': '5'},
]
},
'articles_by_assoc': {
'data': [
{'type': 'articles_by_assoc', 'id': '10'},
{'type': 'articles_by_assoc', 'id': '11'},
]
},
'articles_by_proxy': {
'data': [
{'type': 'articles_by_obj', 'id': '10'},
{'type': 'articles_by_obj', 'id': '11'},
]
}
}
}
}
person_out = test_app.post_json(
'/people',
person_in,
headers={'Content-Type': 'application/vnd.api+json'},
).json_body['data']
rels = person_out['relationships']
self.assertEqual(len(rels['posts']['data']),0)
self.assertIn({'type': 'comments', 'id': '5'}, rels['comments']['data'])
self.assertNotIn({'type': 'comments', 'id': '4'}, rels['comments']['data'])
self.assertIn({'type': 'articles_by_assoc', 'id': '10'}, rels['articles_by_assoc']['data'])
self.assertNotIn({'type': 'articles_by_assoc', 'id': '11'}, rels['articles_by_assoc']['data'])
self.assertIn({'type': 'articles_by_obj', 'id': '11'}, rels['articles_by_proxy']['data'])
self.assertNotIn({'type': 'articles_by_obj', 'id': '10'}, rels['articles_by_proxy']['data'])
# Still need to test a to_one relationship. Posts has one of those.
# Switching to " for quoting so that the following can be copy/pasted as
# JSON in manual tests.
post_json = {
"data": {
"type": "posts",
"attributes": {
"title": "test"
},
"relationships": {
"author": {
"data": {"type": "people", "id": "10"}
},
"blog": {
"data": {"type": "blogs", "id": "10"}
}
}
}
}
# The Person permission filter defined above shouldn't allow us to POST
# post_json because we don't have permission to POST to Person.posts.
test_app.post_json(
'/posts',
post_json,
headers={'Content-Type': 'application/vnd.api+json'},
status=409 # this should probably be a different status.
)
# Replace the permission filter for Person - we need to be able to
# alter the Person.posts relationship.
pj.view_classes[test_project.models.Person].register_permission_filter(
['post'],
['alter_request'],
lambda *a, **kw: True,
)
post_out = test_app.post_json(
'/posts',
post_json,
headers={'Content-Type': 'application/vnd.api+json'},
)
def test_post_alterreq_relationship(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
def blogs_pfilter(obj, *args, **kwargs):
if obj['id'] == '12':
return False
else:
return True
pj.view_classes[test_project.models.Blog].register_permission_filter(
['patch'],
['alter_request'],
blogs_pfilter,
)
# /people: allow POST to all atts and to 3 relationships.
def people_pfilter(obj, view, permission, **kwargs):
if permission == 'delete' and obj['id'] == '20':
return False
if permission == 'post' and obj['id'] == '12':
return False
return view.permission_object(
True,
{'blogs', 'articles_by_proxy', 'articles_by_assoc'}
)
pj.view_classes[test_project.models.Person].register_permission_filter(
['post', 'delete'],
['alter_request'],
people_pfilter,
)
# /articles_by_assoc: allow POST (required to add people/new to
# 'articles_by_assoc.authors') on all but articles_by_assoc/11.
pj.view_classes[test_project.models.ArticleByAssoc].register_permission_filter(
['post'],
['alter_request'],
lambda obj, *args, **kwargs: obj['id'] not in {'11'}
)
pj.view_classes[test_project.models.ArticleByObj].register_permission_filter(
['post'],
['alter_request'],
lambda obj, *args, **kwargs: obj['id'] not in {'10'}
)
# ONETOMANY relationship.
out = test_app.post_json(
'/people/1/relationships/blogs',
{
'data': [
{'type': 'blogs', 'id': '10'},
{'type': 'blogs', 'id': '11'},
{'type': 'blogs', 'id': '12'},
]
},
headers={'Content-Type': 'application/vnd.api+json'},
).json_body
# pprint.pprint(out)
# Now fetch people/1 and see if the new blogs are there.
p1 = test_app.get('/people/1').json_body['data']
blogs = p1['relationships']['blogs']['data']
# Should have left the original blogs in place.
self.assertIn({'type': 'blogs', 'id': '1'}, blogs)
# Should have added blogs/10 (previously no owner)
self.assertIn({'type': 'blogs', 'id': '10'}, blogs)
# Should have added blogs/11 (previously owned by 11)
self.assertIn({'type': 'blogs', 'id': '11'}, blogs)
# blogs/12 disallowed by blogs filter.
self.assertNotIn({'type': 'blogs', 'id': '12'}, blogs)
# MANYTOMANY relationship.
out = test_app.post_json(
'/people/1/relationships/articles_by_assoc',
{
'data': [
{'type': 'articles_by_assoc', 'id': '10'},
{'type': 'articles_by_assoc', 'id': '11'},
{'type': 'articles_by_assoc', 'id': '12'},
]
},
headers={'Content-Type': 'application/vnd.api+json'},
).json_body
p1 = test_app.get('/people/1').json_body['data']
articles = p1['relationships']['articles_by_assoc']['data']
# Should have added articles_by_assoc/10
self.assertIn({'type': 'articles_by_assoc', 'id': '10'}, articles)
# articles_by_assoc/11 disallowed by articles_by_assoc filter.
self.assertNotIn({'type': 'articles_by_assoc', 'id': '11'}, articles)
# articles_by_assoc/12 disallowed by people filter.
# self.assertNotIn({'type': 'articles_by_assoc', 'id': '12'}, articles)
def test_patch_alterreq_item_with_rels(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
# /people: allow PATCH to all atts and to 3 relationships.
def people_pfilter(obj, view, **kwargs):
return view.permission_object(
True,
{'comments', 'articles_by_proxy', 'articles_by_assoc'}
)
pj.view_classes[test_project.models.Person].register_permission_filter(
['patch'],
['alter_request'],
people_pfilter,
)
# /comments: allow PATCH (required to set 'comments.author') on all
# but comments/4.
def comments_pfilter(obj, **kwargs):
if obj['id'] == '4' and obj['relationships']['author']['data']['id'] == '1':
# We're not allowing people/1 to be the author of comments/4 for
# some reason.
return False
return True
pj.view_classes[test_project.models.Comment].register_permission_filter(
['patch'],
['alter_request'],
comments_pfilter
)
# /articles_by_assoc: allow POST (required to add people/new to
# 'articles_by_assoc.authors') on all but articles_by_assoc/11.
pj.view_classes[test_project.models.ArticleByAssoc].register_permission_filter(
['post'],
['alter_request'],
lambda obj, *args, **kwargs: obj['id'] not in {'11'}
)
pj.view_classes[test_project.models.ArticleByObj].register_permission_filter(
['post'],
['alter_request'],
lambda obj, *args, **kwargs: obj['id'] not in {'11'}
)
person_in = {
'data': {
'type': 'people',
'id': '1',
'attributes': {
'name': 'post perms test'
},
'relationships': {
'posts': {
'data': [
{'type': 'posts', 'id': '1'},
{'type': 'posts', 'id': '2'},
{'type': 'posts', 'id': '3'},
{'type': 'posts', 'id': '20'},
]
},
'comments': {
'data': [
{'type': 'comments', 'id': '1'},
{'type': 'comments', 'id': '4'},
{'type': 'comments', 'id': '5'},
]
},
'articles_by_assoc': {
'data': [
{'type': 'articles_by_assoc', 'id': '10'},
{'type': 'articles_by_assoc', 'id': '11'},
]
},
'articles_by_proxy': {
'data': [
{'type': 'articles_by_obj', 'id': '1'},
{'type': 'articles_by_obj', 'id': '10'},
{'type': 'articles_by_obj', 'id': '11'},
]
}
}
}
}
test_app.patch_json(
'/people/1',
person_in,
headers={'Content-Type': 'application/vnd.api+json'},
)
person_out = test_app.get('/people/1').json_body['data']
rels = person_out['relationships']
# pprint.pprint(rels['posts']['data'])
# pprint.pprint(rels['comments']['data'])
# pprint.pprint(rels['articles_by_assoc']['data'])
# pprint.pprint(rels['articles_by_proxy']['data'])
# Still need to test a to_one relationship. Blogs has one of those.
def blogs_pfilter(obj, view, **kwargs):
if obj['id'] == '13':
# Not allowed to change blogs/13 at all.
return False
if obj['id'] == '10':
# Not allowed to set owner of blogs/10 to people/13
if obj['relationships']['owner']['data'].get('id') == '13':
# print('people/13 not allowed as owner of 10')
return view.permission_object(True, {'posts',})
if obj['id'] == '11':
# Not allowed to set owner of blogs/11 to None.
if obj['relationships']['owner']['data'] is None:
return view.permission_object(True, {'posts',})
return True
pj.view_classes[test_project.models.Blog].register_permission_filter(
['patch'],
['alter_request'],
blogs_pfilter
)
blog = {
'data': {
'type': 'blogs', 'id': None,
'relationships': {
'owner': {
'data': None
}
}
}
}
blog_owner = blog['data']['relationships']['owner']
# /blogs/10 is owned by no-one. Change owner to people/11. Should
# Have permission for this one.
ppl11 = make_ri('people', '11')
blog['data']['id'] = '10'
blog_owner['data'] = ppl11
self.assertNotEqual(
test_app.get('/blogs/10').json_body['data']['relationships']['owner']['data'],
ppl11
)
test_app.patch_json(
'/blogs/10',
blog,
headers={'Content-Type': 'application/vnd.api+json'},
)
self.assertEqual(
test_app.get('/blogs/10').json_body['data']['relationships']['owner']['data'],
ppl11
)
# Not allowed to set blogs/10.owner to people/13 though.
ppl13 = make_ri('people', '13')
blog_owner['data'] = ppl13
test_app.patch_json(
'/blogs/10',
blog,
headers={'Content-Type': 'application/vnd.api+json'},
)
self.assertNotEqual(
test_app.get('/blogs/10').json_body['data']['relationships']['owner']['data'],
ppl13
)
# Should be able to switch ownership of blogs/11 to people/12
ppl12 = make_ri('people', '12')
blog['data']['id'] = '11'
blog_owner['data'] = ppl12
test_app.patch_json(
'/blogs/11',
blog,
headers={'Content-Type': 'application/vnd.api+json'},
)
self.assertEqual(
test_app.get('/blogs/11').json_body['data']['relationships']['owner']['data'],
ppl12
)
# but not to None
blog_owner['data'] = None
test_app.patch_json(
'/blogs/11',
blog,
headers={'Content-Type': 'application/vnd.api+json'},
)
self.assertNotEqual(
test_app.get('/blogs/11').json_body['data']['relationships']['owner']['data'],
None
)
# Shouldn't be allowed to patch blogs/13 at all.
blog['data']['id'] = '13'
test_app.patch_json(
'/blogs/13',
blog,
headers={'Content-Type': 'application/vnd.api+json'},
status=403
)
def test_patch_alterreq_relationships(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
def people_pfilter(obj, view, **kwargs):
if obj['id'] == '1':
return False
if obj['id'] == '2':
return view.permission_object(True, False)
return True
pj.view_classes[test_project.models.Person].register_permission_filter(
['write'],
['alter_request'],
people_pfilter
)
def blogs_pfilter(obj, view, **kwargs):
if obj['id'] == '10':
# Not allowed to change blogs/10 at all.
return False
if obj['id'] == '11':
# Not allowed to set owner of blogs/11 to None.
if obj['relationships']['owner']['data'] is None:
return view.permission_object(True, {'posts',})
if obj['id'] == '12':
# Not allowed to set owner of blogs/12 to people/11
if obj['relationships']['owner']['data'].get('id') == '11':
return view.permission_object(True, {'posts',})
return True
pj.view_classes[test_project.models.Blog].register_permission_filter(
['write'],
['alter_request'],
blogs_pfilter
)
# ONETOMANY tests
# No permission to patch people/1 at all.
test_app.patch_json(
'/people/1/relationships/blogs',
{
'data': [
{'type': 'blogs', 'id': '10'},
]
},
headers={'Content-Type': 'application/vnd.api+json'},
status=403
)
# No permission to patch relationship of people/2.
test_app.patch_json(
'/people/2/relationships/blogs',
{
'data': [
{'type': 'blogs', 'id': '10'},
]
},
headers={'Content-Type': 'application/vnd.api+json'},
status=403
)
test_app.patch_json(
'/people/11/relationships/blogs',
{
'data': [
{'type': 'blogs', 'id': '10'},
{'type': 'blogs', 'id': '12'},
{'type': 'blogs', 'id': '13'},
]
},
headers={'Content-Type': 'application/vnd.api+json'},
)
blog_ids = [
b['id'] for b in
test_app.get('/people/11').json_body['data']['relationships']['blogs']['data']
]
# No permission to blogs/10
self.assertNotIn('10', blog_ids)
# No permission to set blogs/11.owner = people/11
self.assertNotIn('12', blog_ids)
# No permission to set blogs/11.owner = None
self.assertIn('11', blog_ids)
# Allowed to add blogs/13 :)
self.assertIn('13', blog_ids)
# MANYTOMANY tests
def articles_by_assoc_pfilter(obj, view, **kwargs):
if obj['id'] == '10':
# Not allowed to change articles_by_assoc/10 at all.
return False
if obj['id'] == '12':
# Not allowed to alter author of articles_by_assoc/12
return view.permission_object(True, False)
return True
pj.view_classes[test_project.models.ArticleByAssoc].register_permission_filter(
['post', 'delete'],
['alter_request'],
articles_by_assoc_pfilter
)
test_app.patch_json(
'/people/12/relationships/articles_by_assoc',
{
'data': [
{'type': 'articles_by_assoc', 'id': '10'},
{'type': 'articles_by_assoc', 'id': '1'},
]
},
headers={'Content-Type': 'application/vnd.api+json'},
)
article_ids = [
b['id'] for b in
test_app.get('/people/12').json_body['data']['relationships']['articles_by_assoc']['data']
]
# No permission to add 10
self.assertNotIn('10', article_ids)
# Permission to remove 13
self.assertNotIn('13', article_ids)
# No permission to remove 12
self.assertIn('12', article_ids)
# Permission to add 1
self.assertIn('1', article_ids)
def test_delete_alterreq_item(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
def comments_pfilter(obj, view, **kwargs):
if obj['id'] == '1':
return True
else:
return False
pj.view_classes[test_project.models.Comment].register_permission_filter(
['delete'],
['alter_request'],
comments_pfilter,
)
test_app.delete('/comments/1')
test_app.delete('/comments/2', status=403)
def test_delete_alterreq_relationship(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
def blogs_pfilter(obj, *args, **kwargs):
if obj['id'] == '12':
return False
else:
return True
pj.view_classes[test_project.models.Blog].register_permission_filter(
['patch'],
['alter_request'],
blogs_pfilter,
)
# /people: allow POST to all atts and to 3 relationships.
def people_pfilter(obj, view, permission, target, **kwargs):
rels = obj['relationships']
if target.name == 'blogs' and rels['blogs']['data'][0]['id'] == '1':
return False
else:
return True
pj.view_classes[test_project.models.Person].register_permission_filter(
['post', 'delete'],
['alter_request'],
people_pfilter,
)
# /articles_by_assoc: allow POST (required to add people/new to
# 'articles_by_assoc.authors') on all but articles_by_assoc/11.
pj.view_classes[test_project.models.ArticleByAssoc].register_permission_filter(
['post'],
['alter_request'],
lambda obj, *args, **kwargs: obj['id'] not in {'11'}
)
pj.view_classes[test_project.models.ArticleByObj].register_permission_filter(
['post'],
['alter_request'],
lambda obj, *args, **kwargs: obj['id'] not in {'10'}
)
# ONETOMANY relationship.
out = test_app.delete_json(
'/people/1/relationships/blogs',
{
'data': [
{'type': 'blogs', 'id': '1'},
{'type': 'blogs', 'id': '2'},
]
},
headers={'Content-Type': 'application/vnd.api+json'},
).json_body
# pprint.pprint(out)
post_ids = [
b['id'] for b in
test_app.get('/people/1').json_body['data']['relationships']['blogs']['data']
]
self.assertIn('1', post_ids)
self.assertNotIn('2', post_ids)
class TestRelationships(DBTestBase):
'''Test functioning of relationsips.
'''
# Test data convention:
#
# src:10 -> undef or []
# src:11 -> tgt:11 or [tgt:11]
# src:12 -> [tgt:12, tgt:13]
###############################################
# Relationship GET tests.
###############################################
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_rels_object(self, src, tgt, comment):
'''Relationships key should be object with a defined structure.
The value of the relationships key MUST be an object (a “relationships
object”). Members of the relationships object (“relationships”)
represent references from the resource object in which it’s defined to
other resource objects.
Relationships links object should have 'self' and 'related' links.
'''
# Fetch item 1 from the collection
r = self.test_app().get('/{}/1'.format(src.collection))
item = r.json['data']
# Should have relationships key
self.assertIn('relationships', item)
rels = item['relationships']
# The named relationship should exist.
self.assertIn(src.rel, rels)
# Check the structure of the relationship object.
obj = rels[src.rel]
self.assertIn('links', obj)
self.assertIn('self', obj['links'])
self.assertTrue(obj['links']['self'].endswith(
'{}/1/relationships/{}'.format(src.collection, src.rel)
))
self.assertIn('related', obj['links'])
self.assertTrue(obj['links']['related'].endswith(
'{}/1/{}'.format(src.collection, src.rel)
))
self.assertIn('data', obj)
if tgt.many:
self.assertIsInstance(obj['data'], list)
self.assertIn('type', obj['data'][0])
self.assertIn('id', obj['data'][0])
else:
self.assertIsInstance(obj['data'], dict)
self.assertIn('type', obj['data'])
self.assertIn('id', obj['data'])
def test_rel_many_to_many_self(self):
"""
Should get items from a self referential many to many relationship.
"""
test_app = self.test_app()
data = test_app.get("/jobs/1").json_body['data']
minions = {ri['id'] for ri in data['relationships']['minions']['data']}
bosses = {ri['id'] for ri in data['relationships']['bosses']['data']}
self.assertEqual(minions, {'2', '3'})
self.assertEqual(bosses, set())
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_rels_related_get(self, src, tgt, comment):
''''related' link should fetch related resource(s).
If present, a related resource link MUST reference a valid URL, even if
the relationship isn’t currently associated with any target resources.
'''
# Fetch item 1 from the collection
r = self.test_app().get('/{}/1'.format(src.collection))
item = r.json['data']
# Fetch the related url.
url = item['relationships'][src.rel]['links']['related']
data = self.test_app().get(url).json['data']
# Check that the returned data is of the expected type.
if tgt.many:
self.assertIsInstance(data, list)
for related_item in data:
self.assertEqual(related_item['type'], tgt.collection)
else:
self.assertIsInstance(data, dict)
self.assertEqual(data['type'], tgt.collection)
def test_rels_related_get_no_relationship(self):
"""Should fail to get an invalid relationship."""
self.test_app().get('/blogs/1/no_such_relationship',
status=400,
)
def test_rels_related_get_no_object(self):
"""Should fail if 'parent' doesn't exist."""
self.test_app().get('/blogs/99999/owner',
status=400,
)
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_rels_resource_linkage(self, src, tgt, comment):
'''Appropriate related resource identifiers in relationship.
Resource linkage in a compound document allows a client to link together
all of the included resource objects without having to GET any URLs via
links.
Resource linkage MUST be represented as one of the following:
* null for empty to-one relationships.
* an empty array ([]) for empty to-many relationships.
* a single resource identifier object for non-empty to-one
relationships.
* an array of resource identifier objects for non-empty to-many
relationships.
'''
# Test data convention:
#
# src:10 -> None or []
# src:11 -> tgt:11 or [tgt:11]
# src:12 -> [tgt:12, tgt:13]
# We always need items 10 and 11 from the source collection.
reldata_with_none = self.test_app().get(
'/{}/10'.format(src.collection)
).json['data']['relationships'][src.rel]['data']
reldata_with_one = self.test_app().get(
'/{}/11'.format(src.collection)
).json['data']['relationships'][src.rel]['data']
if tgt.many:
# Empty to_many relationship should hold [].
self.assertEqual(reldata_with_none, [])
# Should be an array with one item.
self.assertEqual(
reldata_with_one[0],
{'type': tgt.collection, 'id': '11'}
)
# We need item 12 for a to_many relationship.
# Note that we sort the list of related items so that they are in a
# known order for later testing.
reldata_with_two = sorted(
self.test_app().get(
'/{}/12'.format(src.collection)
).json['data']['relationships'][src.rel]['data'],
key=lambda item: item['id']
)
# Should be an array with two items.
self.assertEqual(
reldata_with_two[0], {'type': tgt.collection, 'id': '12'}
)
self.assertEqual(
reldata_with_two[1], {'type': tgt.collection, 'id': '13'}
)
else:
# Empty to_one relationship should hold None.
self.assertIsNone(reldata_with_none)
# Otherwise a single item {type: tgt_type, id: 11}.
self.assertEqual(reldata_with_one, {'type': tgt.collection, 'id': '11'})
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_rels_fetch_relationship_link(self, src, tgt, comment):
'''relationships links should return linkage information.
A server MUST support fetching relationship data for every relationship
URL provided as a self link as part of a relationship’s links object
The primary data in the response document MUST match the appropriate
value for resource linkage, as described above for relationship objects.
If [a to-one] relationship is empty, then a GET request to the
[relationship] URL would return:
"data": null
If [a to-many] relationship is empty, then a GET request to the
[relationship] URL would return:
"data": []
'''
for item_id in ['10', '11', '12']:
url = self.test_app().get(
'/{}/{}'.format(src.collection, item_id)
).json['data']['relationships'][src.rel]['links']['self']
reldata = self.test_app().get(url).json['data']
if tgt.many:
if item_id == '10':
self.assertEqual(reldata, [])
elif item_id == '11':
self.assertEqual(reldata[0]['type'], tgt.collection)
self.assertEqual(reldata[0]['id'], '11')
else:
reldata.sort(key=lambda item: item['id'])
self.assertEqual(reldata[0]['type'], tgt.collection)
self.assertEqual(reldata[0]['id'], '12')
self.assertEqual(reldata[1]['type'], tgt.collection)
self.assertEqual(reldata[1]['id'], '13')
else:
if item_id == '10':
self.assertIsNone(reldata)
elif item_id == '11':
self.assertEqual(reldata['type'], tgt.collection)
self.assertEqual(reldata['id'], '11')
else:
continue
def test_rels_fetch_not_found_relationship(self):
'''Should 404 when fetching a relationship that does not exist.
A server MUST return 404 Not Found when processing a request to fetch a
relationship link URL that does not exist.
'''
# Try to get the author of a non existent post.
r = self.test_app().get('/posts/1000/relationships/author', status=404)
# Try to get data about a non existing relationships
self.test_app().get('/posts/1/relationships/no_such_relationship',
status=404)
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_rels_filter(self, src, tgt, comment):
'''
'''
for filter in tgt.filters:
json = self.test_app().get(
'/{}?filter[{}.{}:{}]={}&include={}'.format(
src.collection,
src.rel,
filter.att,
filter.op,
filter.value,
src.rel,
)
).json
#included = json['included']
included = {
(inc['type'], inc['id']): inc for inc in json['included']
}
# There should be at least one match.
self.assertGreater(len(included), 0)
items = json['data']
# For each returned item, there should be at least one related
# item which matches the filter.
for item in items:
res_ids = item['relationships'][src.rel]['data']
self.assertIsNotNone(res_ids)
if not tgt.many:
res_ids = [res_ids]
found_match = False
for res_id in res_ids:
relitem = included[(res_id['type'], res_id['id'])]
found_match = self.evaluate_filter(
relitem['attributes'][filter.att],
filter.op,
filter.value
)
if found_match:
break
self.assertTrue(found_match)
###############################################
# Relationship POST tests.
###############################################
def test_rels_post_no_such_relationship(self):
"""Should fail to create an invalid relationship."""
created_id = self.test_app().post_json(
'/blogs',
{
'data': {
'type': 'blogs',
'attributes': {
'title': 'test'
},
'relationships': {
'no_such_relationship': {
'data': {'type': 'people', 'id': '1'}
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=404
)
def test_rels_post_relationship_no_data(self):
"Relationships mentioned in POSTs must have data."
created_id = self.test_app(
options = {
'pyramid_jsonapi.schema_validation': 'false'
}
).post_json(
'/blogs',
{
'data': {
'type': 'blogs',
'attributes': {
'title': 'test'
},
'relationships': {
'owner': {}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
def test_rels_post_relationship_no_id(self):
"Relationship linkage in POST requests must have id."
created_id = self.test_app(
options = {
'pyramid_jsonapi.schema_validation': 'false'
}
).post_json(
'/blogs',
{
'data': {
'type': 'blogs',
'attributes': {
'title': 'test'
},
'relationships': {
'owner': {
'data': {'type': 'people'}
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_rels_post_to_relationships(self, src, tgt, comment):
'''Should add items to a TOMANY relationship; 403 Error for TOONE.
If a client makes a POST request to a URL from a relationship link, the
server MUST add the specified members to the relationship unless they
are already present. If a given type and id is already in the
relationship, the server MUST NOT add it again.
'''
if not tgt.many:
# Cannot POST to TOONE relationship. 403 Error.
self.test_app(
options = {
'pyramid_jsonapi.schema_validation': 'false'
}
).post_json(
'/{}/10/relationships/{}'.format(src.collection, src.rel),
{'type': tgt.collection, 'id': '11'},
headers={'Content-Type': 'application/vnd.api+json'},
status=403
)
return
# Add related items 12 and 13 to item 10 (has no related items).
self.test_app().post_json(
'/{}/10/relationships/{}'.format(src.collection, src.rel),
{
'data': [
{ 'type': tgt.collection, 'id': '12'},
{ 'type': tgt.collection, 'id': '13'}
]
},
headers={'Content-Type': 'application/vnd.api+json'},
)
# Make sure they are there.
rel_ids = {
rel_item['id'] for rel_item in
self.test_app().get(
'/{}/10/relationships/{}'.format(src.collection, src.rel)
).json['data']
}
self.assertEqual(rel_ids, {'12', '13'})
# Make sure adding relitem:12 again doesn't result in two relitem:12s
self.test_app().post_json(
'/{}/10/relationships/{}'.format(src.collection, src.rel),
{
'data': [
{ 'type': tgt.collection, 'id': '12'},
]
},
headers={'Content-Type': 'application/vnd.api+json'},
)
rel_ids = [
rel_item['id'] for rel_item in
self.test_app().get(
'/{}/10/relationships/{}'.format(src.collection, src.rel)
).json['data']
]
self.assertEqual(sorted(rel_ids), ['12', '13'])
# Make sure adding relitem:11 adds to the list, rather than replacing
# it.
self.test_app().post_json(
'/{}/10/relationships/{}'.format(src.collection, src.rel),
{
'data': [
{ 'type': tgt.collection, 'id': '11'},
]
},
headers={'Content-Type': 'application/vnd.api+json'},
)
rel_ids = [
rel_item['id'] for rel_item in
self.test_app().get(
'/{}/10/relationships/{}'.format(src.collection, src.rel)
).json['data']
]
self.assertEqual(sorted(rel_ids), ['11', '12', '13'])
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_rels_post_item_with_related(self, src, tgt, comment):
'''Should add a new item with linkage to related resources.
If a relationship is provided in the relationships member of the
resource object, its value MUST be a relationship object with a data
member. The value of this key represents the linkage the new resource is
to have.
'''
# Add a new item related to relitem:12 and possibly relitem:13
reldata = {'type': tgt.collection, 'id': '12'}
if tgt.many:
reldata = [ reldata, {'type': tgt.collection, 'id': '13'} ]
item_id = self.test_app().post_json(
'/{}'.format(src.collection),
{
'data': {
'type': src.collection,
'relationships': {
src.rel: {
'data': reldata
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'}
).json['data']['id']
# GET it back and check that relationship linkage is correct.
item = self.test_app().get(
'/{}/{}'.format(src.collection, item_id)
).json['data']
if tgt.many:
specified_related_ids = {'12', '13'}
found_related_ids = {
thing['id'] for thing in item['relationships'][src.rel]['data']
}
self.assertEqual(specified_related_ids, found_related_ids)
else:
self.assertEqual(item['relationships'][src.rel]['data']['id'], '12')
# Now attempt to add another item with malformed requests.
incorrect_type_data = { 'type': 'frogs', 'id': '12' }
no_id_data = { 'type': tgt.collection, 'id_typo': '12'}
# No data element in rel.
self.test_app().post_json(
'/{}'.format(src.collection),
{
'data': {
'type': src.collection,
'relationships': {
src.rel: {
'meta': 'should fail'
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
if tgt.many:
incorrect_type_data = [ incorrect_type_data ]
no_id_data = [ no_id_data ]
# Not an array.
self.test_app().post_json(
'/{}'.format(src.collection),
{
'data': {
'type': src.collection,
'relationships': {
src.rel: {
'data': { 'type': tgt.collection, 'id': '12'}
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
else:
# Data is an array of identifiers when it should be just one.
self.test_app().post_json(
'/{}'.format(src.collection),
{
'data': {
'type': src.collection,
'relationships': {
src.rel: {
'data': [
{ 'type': tgt.collection, 'id': '12'}
]
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
# Data malformed (not a resource identifier or array of them).
self.test_app().post_json(
'/{}'.format(src.collection),
{
'data': {
'type': src.collection,
'relationships': {
src.rel: {
'data': 'splat'
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
# Item with incorrect type.
self.test_app().post_json(
'/{}'.format(src.collection),
{
'data': {
'type': src.collection,
'relationships': {
src.rel: {
'data': incorrect_type_data
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=409
)
# Item with no id.
self.test_app().post_json(
'/{}'.format(src.collection),
{
'data': {
'type': src.collection,
'relationships': {
src.rel: {
'data': no_id_data
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
def test_rels_post_relationships_nonexistent_relationship(self):
'''Should return 404 error (relationship not found).
'''
# Try to add people/1 to no_such_relationship.
self.test_app().post_json(
'/articles_by_assoc/2/relationships/no_such_relationship',
{
'data': [
{ 'type': 'people', 'id': '1'}
]
},
headers={'Content-Type': 'application/vnd.api+json'},
status=404
)
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_rels_post_relationships_nonexistent_item(self, src, tgt, comment):
'''Should return HTTPFailedDependency (424).
'''
# Try to add tgt/99999 (doesn't exist) to src.rel
reldata = { 'type': tgt.collection, 'id': '99999'}
status = 403
if tgt.many:
reldata = [ reldata ]
status = 424
self.test_app().post_json(
'/{}/10/relationships/{}'.format(src.collection, src.rel),
{
'data': reldata
},
headers={'Content-Type': 'application/vnd.api+json'},
status=status
)
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_spec_post_relationships_invalid_id(self, src, tgt, comments):
'''Should return HTTPBadRequest.
'''
if not tgt.many:
return
# Try to add item/splat to rel..
self.test_app().post_json(
'/{}/10/relationships/{}'.format(src.collection, src.rel),
{
'data': [
{ 'type': tgt.collection, 'id': 'splat'}
]
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
def test_rels_post_relationships_integrity_error(self):
'''Should return HTTPFailedDependency.
'''
# Try to add blog/1 to people/3 (db constraint precludes this)
self.test_app().post_json(
'/people/3/relationships/blogs',
{
'data': [
{ 'type': 'blogs', 'id': '1'}
]
},
headers={'Content-Type': 'application/vnd.api+json'},
status=424
)
###############################################
# Relationship PATCH tests.
###############################################
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_rels_patch_resources_relationships(self, src, tgt, comment):
'''Should replace src.rel with new contents.
Any or all of a resource’s relationships MAY be included in the resource
object included in a PATCH request.
If a request does not include all of the relationships for a resource,
the server MUST interpret the missing relationships as if they were
included with their current values. It MUST NOT interpret them as null
or empty values.
If a relationship is provided in the relationships member of a resource
object in a PATCH request, its value MUST be a relationship object with
a data member. The relationship’s value will be replaced with the value
specified in this member.
'''
reldata = {'type': tgt.collection, 'id': '12'}
if tgt.many:
reldata = [ reldata, {'type': tgt.collection, 'id': '13'} ]
# PATCH src/10/rels/rel to be reldata
self.test_app().patch_json(
'/{}/10'.format(src.collection),
{
'data': {
'id': '10',
'type': src.collection,
'relationships': {
src.rel: {
'data': reldata
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
)
# Check that src.rel has the correct linkage.
src_item = self.test_app().get('/{}/10'.format(src.collection)).json['data']
if tgt.many:
for related_item in src_item['relationships'][src.rel]['data']:
self.assertEqual(related_item['type'], tgt.collection)
self.assertIn(related_item['id'], {'12', '13'})
else:
self.assertEqual(src_item['relationships'][src.rel]['data'], reldata)
# Now try PATCHing the relationship back to empty
if tgt.many:
reldata = []
else:
reldata = None
self.test_app().patch_json(
'/{}/10'.format(src.collection),
{
'data': {
'id': '10',
'type': src.collection,
'relationships': {
src.rel: {
'data': reldata
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
)
src_item = self.test_app().get('/{}/10'.format(src.collection)).json['data']
self.assertEqual(src_item['relationships'][src.rel]['data'], reldata)
# MUST be a relationship object with a data member
# Try without a data member...
self.test_app().patch_json(
'/{}/10'.format(src.collection),
{
'data': {
'id': '10',
'type': src.collection,
'relationships': {
src.rel: reldata
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_rels_patch_relationships(self, src, tgt, comment):
'''Should update a relationship.
A server MUST respond to PATCH requests to a URL from a to-one
relationship link as described below.
The PATCH request MUST include a top-level member named data containing
one of:
* a resource identifier object corresponding to the new related
resource.
* null, to remove the relationship.
If a client makes a PATCH request to a URL from a to-many relationship
link, the server MUST either completely replace every member of the
relationship, return an appropriate error response if some resources can
not be found or accessed, or return a 403 Forbidden response if complete
replacement is not allowed by the server.
'''
if tgt.many:
new_reldata = [
{ 'type': tgt.collection, 'id': '12'},
{ 'type': tgt.collection, 'id': '13'}
]
new_empty = []
else:
new_reldata = { 'type': tgt.collection, 'id': '12'}
new_empty = None
# src:11 should be related to tgt:11. Update the relationship.
self.test_app().patch_json(
'/{}/10/relationships/{}'.format(src.collection, src.rel),
{
'data': new_reldata
},
headers={'Content-Type': 'application/vnd.api+json'},
)
# Check that the change went through
fetched_reldata = self.test_app().get(
'/{}/10/relationships/{}'.format(src.collection, src.rel)
).json['data']
if tgt.many:
expected_length = 2
expected_ids = {'12', '13'}
else:
# Wrap to_one results in an array to make the following code DRY.
fetched_reldata = [ fetched_reldata ]
expected_length = 1
expected_ids = {'12'}
fetched_reldata.sort(key=lambda item: item['id'])
self.assertEqual(len(fetched_reldata), expected_length)
for relitem in fetched_reldata:
self.assertEqual(relitem['type'], tgt.collection)
self.assertIn(relitem['id'], expected_ids)
# Update the relationship to be empty.
self.test_app().patch_json(
'/{}/10/relationships/{}'.format(src.collection, src.rel),
{
'data': new_empty
},
headers={'Content-Type': 'application/vnd.api+json'},
)
# Check that it's empty.
self.assertEqual(
self.test_app().get(
'/{}/10/relationships/{}'.format(src.collection, src.rel)
).json['data'],
new_empty
)
def test_rels_patch_relationships_nonexistent_relationship(self):
'''Should return 404 error (relationship not found).
'''
# Try set people/1 on no_such_relationship.
self.test_app().patch_json(
'/articles_by_assoc/2/relationships/no_such_relationship',
{
'data': [
{ 'type': 'people', 'id': '1'}
]
},
headers={'Content-Type': 'application/vnd.api+json'},
status=404
)
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_rels_patch_relationships_nonexistent_item(self, src, tgt, comment):
'''Should return HTTPFailedDependency.
'''
reldata = { 'type': tgt.collection, 'id': '99999' }
if tgt.many:
reldata = [ reldata ]
self.test_app().patch_json(
'/{}/10/relationships/{}'.format(src.collection, src.rel),
{
'data': reldata
},
headers={'Content-Type': 'application/vnd.api+json'},
status=424
)
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_rels_patch_relationships_invalid_id(self, src, tgt, comment):
'''Should return HTTPBadRequest.
'''
reldata = { 'type': tgt.collection, 'id': 'splat' }
if tgt.many:
reldata = [ reldata ]
self.test_app().patch_json(
'/{}/10/relationships/{}'.format(src.collection, src.rel),
{
'data': reldata
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
def test_rels_patch_relationships_integrity_error(self):
'''Should return HTTPFailedDependency.
'''
# Try to add blog/1 to people/3 (db constraint precludes this)
self.test_app().patch_json(
'/people/3/relationships/blogs',
{
'data': [
{ 'type': 'blogs', 'id': '1'}
]
},
headers={'Content-Type': 'application/vnd.api+json'},
status=424
)
# and the other way round
self.test_app().patch_json(
'/blogs/1/relationships/owner',
{
'data': { 'type': 'people', 'id': '3'}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=424
)
###############################################
# Relationship DELETE tests.
###############################################
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_rels_delete_relationships(self, src, tgt, comment):
'''Should remove items from relationship.
If the client makes a DELETE request to a URL from a relationship link
the server MUST delete the specified members from the relationship or
return a 403 Forbidden response. If all of the specified resources are
able to be removed from, or are already missing from, the relationship
then the server MUST return a successful response
'''
if not tgt.many:
# DELETEing from a to_one relationship is not allowed.
self.test_app().delete(
'/{}/11/relationships/{}'.format(src.collection, src.rel),
status=403
)
return
# Attempt to delete tgt:13 from src:12
self.test_app().delete_json(
'/{}/12/relationships/{}'.format(src.collection, src.rel),
{
'data': [
{'type': tgt.collection, 'id': '13'}
]
},
headers={'Content-Type': 'application/vnd.api+json'},
)
# Test that tgt:13 is no longer in the relationship.
self.assertEqual(
{'12'},
{
item['id'] for item in
self.test_app().get(
'/{}/12/relationships/{}'.format(src.collection, src.rel)
).json['data']
}
)
# Try to DELETE tgt:13 from relationship again. Should return success.
self.test_app().delete_json(
'/{}/12/relationships/{}'.format(src.collection, src.rel),
{
'data': [
{'type': tgt.collection, 'id': '13'}
]
},
headers={'Content-Type': 'application/vnd.api+json'},
)
self.assertEqual(
{'12'},
{
item['id'] for item in
self.test_app().get(
'/{}/12/relationships/{}'.format(src.collection, src.rel)
).json['data']
}
)
def test_rels_delete_relationships_nonexistent_relationship(self):
'''Should return 404 error (relationship not found).
'''
# Delete people/1 from no_such_relationship.
self.test_app().delete_json(
'/articles_by_assoc/2/relationships/no_such_relationship',
{
'data': [
{ 'type': 'people', 'id': '1'}
]
},
headers={'Content-Type': 'application/vnd.api+json'},
status=404
)
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_rels_delete_relationships_nonexistent_item(self, src, tgt, comment):
'''Should return HTTPFailedDependency.
'''
if not tgt.many:
return
self.test_app().delete_json(
'/{}/11/relationships/{}'.format(src.collection, src.rel),
{
'data': [ { 'type': tgt.collection, 'id': '99999' } ]
},
headers={'Content-Type': 'application/vnd.api+json'},
status=424
)
@parameterized.expand(rel_infos, doc_func=rels_doc_func)
def test_rels_delete_relationships_invalid_id(self, src, tgt, comment):
'''Should return HTTPBadRequest.
'''
if not tgt.many:
return
# Try to delete tgt:splat from src:11.
self.test_app().delete_json(
'/{}/11/relationships/{}'.format(src.collection, src.rel),
{
'data': [
{ 'type': tgt.collection, 'id': 'splat'}
]
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
def test_adjacancy_list(self):
'''Should correctly identify parent and children for TreeNode.
'''
top = self.test_app().get('/treenodes/1').json
top_1 = self.test_app().get('/treenodes/2').json
# top should have no parent.
self.assertIsNone(top['data']['relationships']['parent']['data'])
# top should have multiple children.
self.assertIsInstance(top['data']['relationships']['children']['data'], list)
# top_1 should have top as a parent.
self.assertEqual(
top_1['data']['relationships']['parent']['data'],
{'type': 'treenodes', 'id': '1'}
)
# top_1 should have 2 children.
self.assertIsInstance(top_1['data']['relationships']['children']['data'], list)
self.assertEqual(len(top_1['data']['relationships']['children']['data']), 2)
class TestSpec(DBTestBase):
'''Test compliance against jsonapi spec.
http://jsonapi.org/format/
'''
###############################################
# GET tests.
###############################################
def test_spec_server_content_type(self):
'''Response should have correct content type.
Servers MUST send all JSON API data in response documents with the
header Content-Type: application/vnd.api+json without any media type
parameters.
'''
# Fetch a representative page
r = self.test_app().get('/people')
self.assertEqual(r.content_type, 'application/vnd.api+json')
def test_spec_incorrect_client_content_type(self):
'''Server should return error if we send media type parameters.
Servers MUST respond with a 415 Unsupported Media Type status code if a
request specifies the header Content-Type: application/vnd.api+json with
any media type parameters.
'''
r = self.test_app().get(
'/people',
headers={ 'Content-Type': 'application/vnd.api+json; param=val' },
status=415,
)
def test_spec_accept_not_acceptable(self):
'''Server should respond with 406 if all jsonapi media types have parameters.
Servers MUST respond with a 406 Not Acceptable status code if a
request's Accept header contains the JSON API media type and all
instances of that media type are modified with media type parameters.
'''
# Should work with correct accepts header.
r = self.test_app().get(
'/people',
headers={ 'Accept': 'application/vnd.api+json' },
)
# 406 with one incorrect type.
r = self.test_app().get(
'/people',
headers={ 'Accept': 'application/vnd.api+json; param=val' },
status=406,
)
# 406 with more than one type but none without params.
r = self.test_app().get(
'/people',
headers={ 'Accept': 'application/vnd.api+json; param=val,' +
'application/vnd.api+json; param2=val2' },
status=406,
)
def test_spec_toplevel_must(self):
'''Server response must have one of data, errors or meta.
A JSON object MUST be at the root of every JSON API request and response
containing data.
A document MUST contain at least one of the following top-level members:
* data: the document's “primary data”
* errors: an array of error objects
* meta: a meta object that contains non-standard meta-information.
'''
# Should be object with data member.
r = self.test_app().get('/people')
self.assertIn('data', r.json)
# Should also have a meta member.
self.assertIn('meta', r.json)
# Should be object with an array of errors.
r = self.test_app().get(
'/people',
headers={ 'Content-Type': 'application/vnd.api+json; param=val' },
status=415,
)
self.assertIn('errors', r.json)
self.assertIsInstance(r.json['errors'], list)
def test_spec_get_no_such_item(self):
'''Should fail to get non-existent comments/99999
A server MUST respond with 404 Not Found when processing a request
to fetch a single resource that does not exist
'''
# Get comments/99999
self.test_app().get('/comments/99999', status=404)
def test_spec_get_invalid_item(self):
'''Should fail to get invalid item comments/cat
A server MUST respond with 404 Not Found when processing a request
to fetch a single resource that does not exist
'''
# Get comments/cat
self.test_app().get('/comments/cat', status=404)
def test_spec_get_primary_data_empty(self):
'''Should return an empty list of results.
Primary data MUST be either:
* ...or an empty array ([])
A logical collection of resources MUST be represented as an array, even
if it... is empty.
'''
r = self.test_app().get('/people?filter[name:eq]=doesnotexist')
self.assertEqual(len(r.json['data']), 0)
def test_spec_get_primary_data_array(self):
'''Should return an array of resource objects.
Primary data MUST be either:
* an array of resource objects, an array of resource identifier
objects, or an empty array ([]), for requests that target resource
collections
'''
# Data should be an array of person resource objects or identifiers.
r = self.test_app().get('/people')
self.assertIn('data', r.json)
self.assertIsInstance(r.json['data'], list)
item = r.json['data'][0]
def test_spec_get_primary_data_array_of_one(self):
'''Should return an array of one resource object.
A logical collection of resources MUST be represented as an array, even
if it only contains one item...
'''
r = self.test_app().get('/people?page[limit]=1')
self.assertIn('data', r.json)
self.assertIsInstance(r.json['data'], list)
self.assertEqual(len(r.json['data']), 1)
def test_spec_get_primary_data_single(self):
'''Should return a single resource object.
Primary data MUST be either:
* a single resource object, a single resource identifier object, or
null, for requests that target single resources
'''
# Find the id of alice.
r = self.test_app().get('/people?filter[name:eq]=alice')
item = r.json['data'][0]
self.assertEqual(item['attributes']['name'], 'alice')
alice_id = item['id']
# Now get alice object.
r = self.test_app().get('/people/' + alice_id)
alice = r.json['data']
self.assertEqual(alice['attributes']['name'], 'alice')
def test_spec_resource_object_must(self):
'''Resource object should have at least id and type.
A resource object MUST contain at least the following top-level members:
* id
* type
The values of the id and type members MUST be strings.
'''
r = self.test_app().get('/people?page[limit]=1')
item = r.json['data'][0]
# item must have at least a type and id.
self.assertEqual(item['type'], 'people')
self.assertIn('id', item)
self.assertIsInstance(item['type'], str)
self.assertIsInstance(item['id'], str)
def test_spec_resource_object_should(self):
'''Fetched resource should have attributes, relationships, links, meta.
a resource object MAY contain any of these top-level members:
* attributes: an attributes object representing some of the
resource’s data.
* relationships: a relationships object describing relationships
between the resource and other JSON API resources.
* links: a links object containing links related to the resource.
* meta: a meta object containing non-standard meta-information about
a resource that can not be represented as an attribute or
relationship.
'''
r = self.test_app().get('/people?page[limit]=1')
item = r.json['data'][0]
self.assertIn('attributes', item)
#self.assertIn('relationships', item)
self.assertIn('links', item)
#self.assertIn('meta', item)
def test_spec_type_id_identify_resource(self):
'''Using type and id should fetch a single resource.
Within a given API, each resource object’s type and id pair MUST
identify a single, unique resource.
'''
# Find the id of alice.
r = self.test_app().get('/people?filter[name:eq]=alice')
item = r.json['data'][0]
self.assertEqual(item['attributes']['name'], 'alice')
alice_id = item['id']
# Search for alice by id. We should get one result whose name is alice.
r = self.test_app().get('/people?filter[id:eq]={}'.format(alice_id))
self.assertEqual(len(r.json['data']), 1)
item = r.json['data'][0]
self.assertEqual(item['id'], alice_id)
self.assertEqual(item['attributes']['name'], 'alice')
def test_spec_attributes(self):
'''attributes key should be an object.
The value of the attributes key MUST be an object (an “attributes
object”). Members of the attributes object (“attributes”) represent
information about the resource object in which it’s defined.
'''
# Fetch a single post.
r = self.test_app().get('/posts?page[limit]=1')
item = r.json['data'][0]
# Check attributes.
self.assertIn('attributes', item)
atts = item['attributes']
self.assertIn('title', atts)
self.assertIn('content', atts)
self.assertIn('published_at', atts)
def test_spec_no_foreign_keys(self):
'''No foreign keys in attributes.
Although has-one foreign keys (e.g. author_id) are often stored
internally alongside other information to be represented in a resource
object, these keys SHOULD NOT appear as attributes.
'''
# posts have author_id and blog_id as has-one foreign keys. Check that
# they don't make it into the JSON representation (they should be in
# relationships instead).
# Fetch a single post.
r = self.test_app().get('/posts?page[limit]=1')
item = r.json['data'][0]
# Check for foreign keys.
self.assertNotIn('author_id', item['attributes'])
self.assertNotIn('blog_id', item['attributes'])
def test_spec_links_self(self):
''''self' link should fetch same object.
The optional links member within each resource object contains links
related to the resource.
If present, this links object MAY contain a self link that identifies
the resource represented by the resource object.
A server MUST respond to a GET request to the specified URL with a
response that includes the resource as the primary data.
'''
person = self.test_app().get('/people/1').json['data']
# Make sure we got the expected person.
self.assertEqual(person['type'], 'people')
self.assertEqual(person['id'], '1')
# Now fetch the self link.
person_again = self.test_app().get(person['links']['self']).json['data']
# Make sure we got the same person.
self.assertEqual(person_again['type'], 'people')
self.assertEqual(person_again['id'], '1')
def test_spec_included_array(self):
'''Included resources should be in an array under 'included' member.
In a compound document, all included resources MUST be represented as an
array of resource objects in a top-level included member.
'''
person = self.test_app().get('/people/1?include=blogs').json
self.assertIsInstance(person['included'], list)
# Each item in the list should be a resource object: we'll look for
# type, id and attributes.
for blog in person['included']:
self.assertIn('id', blog)
self.assertEqual(blog['type'], 'blogs')
self.assertIn('attributes', blog)
def test_spec_bad_include(self):
'''Should 400 error on attempt to fetch non existent relationship path.
If a server is unable to identify a relationship path or does not
support inclusion of resources from a path, it MUST respond with 400 Bad
Request.
'''
# Try to include a relationship that doesn't exist.
r = self.test_app().get('/people/1?include=frogs', status=400)
def test_spec_nested_include(self):
'''Should return includes for nested resources.
In order to request resources related to other resources, a
dot-separated path for each relationship name can be specified:
* GET /articles/1?include=comments.author
'''
r = self.test_app().get('/people/1?include=comments.author')
people_seen = set()
types_expected = {'people', 'comments'}
types_seen = set()
for item in r.json['included']:
# Shouldn't see any types other than comments and people.
self.assertIn(item['type'], types_expected)
types_seen.add(item['type'])
# We should only see people 1, and only once.
if item['type'] == 'people':
self.assertNotIn(item['id'], people_seen)
people_seen.add(item['id'])
# We should have seen at least one of each type.
self.assertIn('people', types_seen)
self.assertIn('comments', types_seen)
def test_spec_multiple_include(self):
'''Should return multiple related resource types.
Multiple related resources can be requested in a comma-separated list:
* GET /articles/1?include=author,comments.author
'''
# TODO(Colin) implement
def test_spec_compound_full_linkage(self):
'''All included resources should be referenced by a resource link.
Compound documents require "full linkage", meaning that every included
resource MUST be identified by at least one resource identifier object
in the same document. These resource identifier objects could either be
primary data or represent resource linkage contained within primary or
included resources.
'''
# get a person with included blogs and comments.
person = self.test_app().get('/people/1?include=blogs,comments').json
# Find all the resource identifiers.
rids = set()
for rel in person['data']['relationships'].values():
if isinstance(rel['data'], list):
for item in rel['data']:
rids.add((item['type'], item['id']))
else:
rids.add((rel['data']['type'], rel['data']['id']))
# Every included item should have an identifier somewhere.
for item in person['included']:
type_ = item['type']
id_ = item['id']
self.assertIn((type_, id_), rids)
def test_spec_compound_no_linkage_sparse(self):
'''Included resources not referenced if referencing field not included.
The only exception to the full linkage requirement is when relationship
fields that would otherwise contain linkage data are excluded via sparse
fieldsets.
'''
person = self.test_app().get(
'/people/1?include=blogs&fields[people]=name,comments'
).json
# Find all the resource identifiers.
rids = set()
for rel in person['data']['relationships'].values():
for item in rel['data']:
rids.add((item['type'], item['id']))
self.assertGreater(len(person['included']), 0)
for blog in person['included']:
self.assertEqual(blog['type'], 'blogs')
def test_spec_compound_unique_resources(self):
'''Each resource object should appear only once.
A compound document MUST NOT include more than one resource object for
each type and id pair.
'''
# get some people with included blogs and comments.
people = self.test_app().get('/people?include=blogs,comments').json
# Check that each resource only appears once.
seen = set()
# Add the main resource objects.
for person in people['data']:
self.assertNotIn((person['type'], person['id']), seen)
seen.add((person['type'], person['id']))
# Check the included resources.
for obj in people['included']:
self.assertNotIn((obj['type'], obj['id']), seen)
seen.add((obj['type'], obj['id']))
def test_spec_links(self):
'''Links should be an object with URL strings.
Where specified, a links member can be used to represent links. The
value of each links member MUST be an object (a "links object").
Each member of a links object is a “link”. A link MUST be represented as
either:
* a string containing the link’s URL.
* an object ("link object") which can contain the following members:
* href: a string containing the link’s URL.
* meta: a meta object containing non-standard meta-information
about the link.
Note: only URL string links are currently generated by jsonapi.
'''
links = self.test_app().get('/people?pj_include_count=1').json['links']
self.assertIsInstance(links['self'], str)
self.assertIsInstance(links['first'], str)
self.assertIsInstance(links['last'], str)
def test_spec_fetch_non_existent(self):
'''Should 404 when fetching non existent resource.
A server MUST respond with 404 Not Found when processing a request to
fetch a single resource that does not exist,
'''
r = self.test_app().get('/people/1000', status=404)
def test_spec_fetch_non_existent_related(self):
'''Should return primary data of null, not 404.
null is only an appropriate response when the requested URL is one that
might correspond to a single resource, but doesn’t currently.
'''
data = self.test_app().get('/comments/5/author').json['data']
self.assertIsNone(data)
def test_spec_sparse_fields(self):
'''Should return only requested fields.
A client MAY request that an endpoint return only specific fields in the
response on a per-type basis by including a fields[TYPE] parameter.
The value of the fields parameter MUST be a comma-separated (U+002C
COMMA, ",") list that refers to the name(s) of the fields to be
returned.
If a client requests a restricted set of fields for a given resource
type, an endpoint MUST NOT include additional fields in resource objects
of that type in its response.
'''
# Ask for just the title, content and author fields of a post.
r = self.test_app().get('/posts/1?fields[posts]=title,content,author')
data = r.json['data']
atts = data['attributes']
self.assertEqual(len(atts), 2)
self.assertIn('title', atts)
self.assertIn('content', atts)
rels = data['relationships']
self.assertEqual(len(rels), 1)
self.assertIn('author', rels)
def test_spec_empty_fields(self):
"""should return no attributes."""
person = self.test_app().get(
'/people?fields[people]='
).json
self.assertEqual(len(person['data'][0]['attributes']), 0)
def test_spec_single_sort(self):
'''Should return collection sorted by correct field.
An endpoint MAY support requests to sort the primary data with a sort
query parameter. The value for sort MUST represent sort fields.
* GET /people?sort=age
'''
data = self.test_app().get('/posts?sort=content').json['data']
prev = ''
for item in data:
self.assertGreaterEqual(item['attributes']['content'], prev)
prev = item['attributes']['content']
def test_spec_related_sort(self):
'''Should return collection sorted by related field.
Note: It is recommended that dot-separated (U+002E FULL-STOP, “.”) sort
fields be used to request sorting based upon relationship attributes.
For example, a sort field of author.name could be used to request that
the primary data be sorted based upon the name attribute of the author
relationship.
'''
res = self.test_app().get('/posts?sort=author.name')
data = res.json['data']
prev = ''
for item in data:
# author_name is a hybrid attribute that just happens to have
# author.name in it.
self.assertGreaterEqual(item['attributes']['author_name'], prev)
prev = item['attributes']['author_name']
def test_spec_multiple_sort(self):
'''Should return collection sorted by multiple fields, applied in order.
An endpoint MAY support multiple sort fields by allowing comma-separated
(U+002C COMMA, ",") sort fields. Sort fields SHOULD be applied in the
order specified.
* GET /people?sort=age,name
'''
data = self.test_app().get('/posts?sort=content,id').json['data']
prev_content = ''
prev_id = 0
for item in data:
self.assertGreaterEqual(
item['attributes']['content'],
prev_content
)
if item['attributes']['content'] != prev_content:
prev_id = 0
self.assertGreaterEqual(int(item['id']), prev_id)
prev_content = item['attributes']['content']
prev_id = int(item['id'])
def test_spec_descending_sort(self):
'''Should return results sorted by field in reverse order.
The sort order for each sort field MUST be ascending unless it is
prefixed with a minus (U+002D HYPHEN-MINUS, "-"), in which case it MUST
be descending.
* GET /articles?sort=-created,title
'''
data = self.test_app().get('/posts?sort=-content').json['data']
prev = 'zzz'
for item in data:
self.assertLessEqual(item['attributes']['content'], prev)
prev = item['attributes']['content']
# TODO(Colin) repeat sort tests for other collection returning endpoints,
# because: Note: This section applies to any endpoint that responds with a
# resource collection as primary data, regardless of the request type
def test_spec_pagination_links(self):
'''Should provide correct pagination links.
A server MAY provide links to traverse a paginated data set ("pagination
links").
Pagination links MUST appear in the links object that corresponds to a
collection. To paginate the primary data, supply pagination links in the
top-level links object. To paginate an included collection returned in a
compound document, supply pagination links in the corresponding links
object.
The following keys MUST be used for pagination links:
* first: the first page of data
* last: the last page of data
* prev: the previous page of data
* next: the next page of data
'''
json = self.test_app().get(
'/posts?pj_include_count=1&page[limit]=2&page[offset]=2'
).json
self.assertEqual(len(json['data']), 2)
self.assertIn('first', json['links'])
self.assertIn('last', json['links'])
self.assertIn('prev', json['links'])
self.assertIn('next', json['links'])
def test_spec_pagination_unavailable_links(self):
'''Next page link should not be available
Keys MUST either be omitted or have a null value to indicate that a
particular link is unavailable.
'''
r = self.test_app().get('/posts?pj_include_count=1&page[limit]=1')
available = r.json['meta']['results']['available']
json = self.test_app().get(
'/posts?pj_include_count=1&page[limit]=2&page[offset]=' + str(available - 2)
).json
self.assertEqual(len(json['data']), 2)
self.assertNotIn('next', json['links'])
def test_spec_negative_offset(self):
"""Offset must not be negative"""
self.test_app().get('/posts?page[offset]=-1', status=400)
def test_spec_negative_limit(self):
"""Limit must not be negative"""
self.test_app().get('/posts?page[limit]=-1', status=400)
def test_spec_pagination_order(self):
'''Pages (and results) should order restults as per order param.
Concepts of order, as expressed in the naming of pagination links, MUST
remain consistent with JSON API’s sorting rules.
'''
data = self.test_app().get(
'/posts?page[limit]=4&sort=content&fields[posts]=content'
).json['data']
self.assertEqual(len(data), 4)
prev = ''
for item in data:
self.assertGreaterEqual(item['attributes']['content'], prev)
prev = item['attributes']['content']
# TODO(Colin) repeat sort tests for other collection returning endpoints,
# because: Note: This section applies to any endpoint that responds with a
# resource collection as primary data, regardless of the request type
def test_spec_filterop_eq(self):
'''Should return collection with just the alice people object.
The filter query parameter is reserved for filtering data. Servers and
clients SHOULD use this key for filtering operations.
'''
data = self.test_app().get('/people?filter[name:eq]=alice').json['data']
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['type'], 'people')
self.assertEqual(data[0]['attributes']['name'], 'alice')
def test_spec_filterop_ne(self):
'''Should return collection of people whose name is not alice.'''
data = self.test_app().get('/people?filter[name:ne]=alice').json['data']
for item in data:
try:
errors = item['meta']['errors']
except KeyError:
self.assertNotEqual('alice', item['attributes']['name'])
def test_spec_filterop_startswith(self):
'''Should return collection where titles start with "post1".'''
data = self.test_app().get(
'/posts?filter[title:startswith]=post1'
).json['data']
for item in data:
self.assertTrue(item['attributes']['title'].startswith('post1'))
def test_spec_filterop_endswith(self):
'''Should return collection where titles end with "main".'''
data = self.test_app().get(
'/posts?filter[title:endswith]=main'
).json['data']
for item in data:
self.assertTrue(item['attributes']['title'].endswith('main'))
def test_spec_filterop_contains(self):
'''Should return collection where titles contain "bob".'''
data = self.test_app().get(
'/posts?filter[title:contains]=bob'
).json['data']
for item in data:
self.assertTrue('bob' in item['attributes']['title'])
def test_spec_filterop_lt(self):
'''Should return posts with published_at less than 2015-01-03.'''
data = self.test_app().get(
'/posts?filter[published_at:lt]=2015-01-03'
).json['data']
ref_date = datetime.datetime(2015,1,3)
for item in data:
#TODO(Colin) investigate more robust way of parsing date.
date = datetime.datetime.strptime(
item['attributes']['published_at'],
"%Y-%m-%dT%H:%M:%S"
)
self.assertLess(date, ref_date)
def test_spec_filterop_gt(self):
'''Should return posts with published_at greater than 2015-01-03.'''
data = self.test_app().get(
'/posts?filter[published_at:gt]=2015-01-03'
).json['data']
ref_date = datetime.datetime(2015,1,3)
for item in data:
#TODO(Colin) investigate more robust way of parsing date.
date = datetime.datetime.strptime(
item['attributes']['published_at'],
"%Y-%m-%dT%H:%M:%S"
)
self.assertGreater(date, ref_date)
def test_spec_filterop_le(self):
'''Should return posts with published_at <= 2015-01-03.'''
data = self.test_app().get(
'/posts?filter[published_at:le]=2015-01-03'
).json['data']
ref_date = datetime.datetime(2015,1,3)
for item in data:
#TODO(Colin) investigate more robust way of parsing date.
date = datetime.datetime.strptime(
item['attributes']['published_at'],
"%Y-%m-%dT%H:%M:%S"
)
self.assertLessEqual(date, ref_date)
def test_spec_filterop_ge(self):
'''Should return posts with published_at >= 2015-01-03.'''
data = self.test_app().get(
'/posts?filter[published_at:ge]=2015-01-03'
).json['data']
ref_date = datetime.datetime(2015,1,3)
for item in data:
#TODO(Colin) investigate more robust way of parsing date.
date = datetime.datetime.strptime(
item['attributes']['published_at'],
"%Y-%m-%dT%H:%M:%S"
)
self.assertGreaterEqual(date, ref_date)
def test_spec_filterop_like(self):
'''Should return collection where content matches "*thing*".'''
data = self.test_app().get(
'/posts?filter[content:like]=*thing*'
).json['data']
for item in data:
self.assertTrue('thing' in item['attributes']['content'])
def test_spec_filterop_ilike(self):
'''Should return collection where content case insensitive matches "*thing*".'''
data = self.test_app().get(
'/posts?filter[content:ilike]=*THING*'
).json['data']
for item in data:
self.assertTrue('thing' in item['attributes']['content'])
def test_spec_filterop_json_contains(self):
'''Should return collection where json_content contains {"b": 2}.'''
data = self.test_app().get(
'/posts?filter[json_content:contains]={"b": 2}'
).json['data']
for item in data:
self.assertIn('b', item['attributes']['json_content'])
def test_spec_filterop_json_contained_by(self):
'''Should return collection where json_content contained by expression.'''
containing_expr = '{"a":1, "b": 2, "c": 3}'
containing_json = json.loads(containing_expr)
data = self.test_app().get(
'/posts?filter[json_content:contained_by]={}'.format(containing_expr)
).json['data']
for item in data:
for key in item['attributes']['json_content']:
self.assertIn(key, containing_json)
def test_spec_filter_related_property(self):
'''Should return collection of posts with author.name=alice.'''
data = self.test_app().get('/posts?filter[author.name:eq]=alice').json['data']
for item in data:
self.assertEqual(item['attributes']['author_name'], 'alice')
###############################################
# POST tests.
###############################################
def test_spec_post_invalid_json(self):
'''Invalid json should raise an error.'''
# Send garbage json
self.test_app().post(
'/people',
'{,,,}',
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
def test_spec_post_no_data_attribute(self):
'''Missing data attribute in json should raise an error.'''
# Send minimal json with no data attribute
self.test_app().post(
'/people',
'{"meta": {}}',
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
def test_spec_post_data_not_item(self):
'''Missing data attribute in json should raise an error.'''
# Send minimal json with no data attribute
self.test_app().post(
'/people',
'{"data": []}',
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
def test_spec_post_collection(self):
'''Should create a new person object.'''
# Make sure there is no test person.
data = self.test_app().get('/people?filter[name:eq]=test').json['data']
self.assertEqual(len(data),0)
# Try adding a test person.
self.test_app().post_json(
'/people',
{
'data': {
'type': 'people',
'attributes': {
'name': 'test'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'}
)
# Make sure they are there.
data = self.test_app().get('/people?filter[name:eq]=test').json['data']
self.assertEqual(len(data),1)
def test_spec_post_collection_no_attributes(self):
'''Should create a person with no attributes.'''
self.test_app().post_json(
'/people',
{
'data': {
'type': 'people',
}
},
headers={'Content-Type': 'application/vnd.api+json'}
)
def test_spec_post_must_have_type(self):
'''type must be specified.
Note: The type member is required in every resource object throughout
requests and responses in JSON API. There are some cases, such as when
POSTing to an endpoint representing heterogenous data, when the type
could not be inferred from the endpoint. However, picking and choosing
when it is required would be confusing; it would be hard to remember
when it was required and when it was not. Therefore, to improve
consistency and minimize confusion, type is always required.
'''
self.test_app().post_json(
'/people',
{
'data': {
'attributes': {
'name': 'test'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
def test_spec_post_with_id(self):
'''Should create a person object with id 1000.
A server MAY accept a client-generated ID along with a request to create
a resource. An ID MUST be specified with an id key. The client SHOULD
use a properly generated and formatted UUID as described in RFC 4122
If a POST request did not include a Client-Generated ID and the
requested resource has been created successfully, the server MUST return
a 201 Created status code.
The response SHOULD include a Location header identifying the location
of the newly created resource.
The response MUST also include a document that contains the primary
resource created.
If the resource object returned by the response contains a self key in
its links member and a Location header is provided, the value of the
self member MUST match the value of the Location header.
Comment: jsonapi.allow_client_ids is set in the ini file, so we should
be able to create objects with ids. The id strategy in test_project
isn't RFC4122 UUID, but we're not enforcing that since there may be
other globally unique id strategies in use.
'''
r = self.test_app().post_json(
'/people',
{
'data': {
'id': '1000',
'type': 'people',
'attributes': {
'name': 'test'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=201 # Test the status code.
)
# Test for Location header.
location = r.headers.get('Location')
self.assertIsNotNone(location)
# Test that json is a resource object
data = r.json['data']
self.assertEqual(data['id'],'1000')
self.assertEqual(data['type'],'people')
self.assertEqual(data['attributes']['name'], 'test')
# Test that the Location header and the self link match.
self.assertEqual(data['links']['self'], location)
def test_spec_post_with_id_disallowed(self):
'''Should 403 when attempting to create object with id.
A server MUST return 403 Forbidden in response to an unsupported request
to create a resource with a client-generated ID.
'''
# We need a test_app with different settings.
test_app = self.test_app(
options={'pyramid_jsonapi.allow_client_ids': 'false'}
)
res = test_app.post_json(
'/people',
{
'data': {
'id': '1000',
'type': 'people',
'attributes': {
'name': 'test'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=403
)
def test_spec_post_with_id_conflicts(self):
'''Should 409 if id exists.
A server MUST return 409 Conflict when processing a POST request to
create a resource with a client-generated ID that already exists.
'''
self.test_app().post_json(
'/people',
{
'data': {
'id': '1',
'type': 'people',
'attributes': {
'name': 'test'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=409 # Test the status code.
)
def test_spec_post_type_conflicts(self):
'''Should 409 if type conflicts with endpoint.
A server MUST return 409 Conflict when processing a POST request in
which the resource object’s type is not among the type(s) that
constitute the collection represented by the endpoint.
'''
self.test_app().post_json(
'/people',
{
'data': {
'id': '1000',
'type': 'frogs',
'attributes': {
'name': 'test'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=409 # Test the status code.
)
###############################################
# PATCH tests.
###############################################
def test_spec_patch(self):
'''Should change alice's name to alice2'''
# Patch alice.
self.test_app().patch_json(
'/people/1',
{
'data': {
'id': '1',
'type': 'people',
'attributes': {
'name': 'alice2'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
)
# Fetch alice back...
data = self.test_app().get('/people/1').json['data']
# ...should now be alice2.
self.assertEqual(data['attributes']['name'], 'alice2')
def test_spec_patch_invalid_json(self):
'''Invalid json should raise an error.'''
# Send garbage json
self.test_app().patch(
'/people/1',
'{,,,}',
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
def test_spec_patch_no_type_id(self):
'''Should 409 if id or type do not exist.
The PATCH request MUST include a single resource object as primary data.
The resource object MUST contain type and id members.
'''
# No id.
self.test_app().patch_json(
'/people/1',
{
'data': {
'type': 'people',
'attributes': {
'name': 'alice2'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=409
)
# No type.
self.test_app().patch_json(
'/people/1',
{
'data': {
'id': '1',
'attributes': {
'name': 'alice2'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=409
)
# No type or id.
self.test_app().patch_json(
'/people/1',
{
'data': {
'attributes': {
'name': 'alice2'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=409
)
def test_spec_patch_integrity_error(self):
'''Should 409 if PATCH violates a server side constraint.
A server MAY return 409 Conflict when processing a PATCH request to
update a resource if that update would violate other server-enforced
constraints (such as a uniqueness constraint on a property other than
id).
'''
self.test_app().patch_json(
'/blogs/1',
{
'data': {
'id': '1',
'type': 'blogs',
'attributes': {
'title': 'forbidden title'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=409
)
def test_spec_patch_item_on_success(self):
'''Should return a representation of the patched object.
If a server accepts an update but also changes the resource(s) in ways
other than those specified by the request (for example, updating the
updated-at attribute or a computed sha), it MUST return a 200 OK
response. The response document MUST include a representation of the
updated resource(s) as if a GET request was made to the request URL
'''
json = self.test_app().patch_json(
'/people/1',
{
'data': {
'id': '1',
'type': 'people',
'attributes': {
'name': 'alice2'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
).json
self.assertIn('meta',json)
self.assertEqual(json['data']['type'], 'people')
self.assertEqual(json['data']['id'], '1')
def notused__spec_patch_empty_success(self):
'''Should return only meta, not data or links.
A server MUST return a 200 OK status code if an update is successful,
the client’s current attributes remain up to date, and the server
responds only with top-level meta data. In this case the server MUST NOT
include a representation of the updated resource(s).
'''
json = self.test_app().patch_json(
'/people/1',
{
'data': {
'id': '1',
'type': 'people',
'attributes': {
'name': 'alice2'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
).json
self.assertIn('meta',json)
self.assertEqual(len(json),1)
def test_spec_patch_nonexistent(self):
'''Should 404 when patching non existent resource.
A server MUST return 404 Not Found when processing a request to modify a
resource that does not exist.
'''
self.test_app().patch_json(
'/people/1000',
{
'data': {
'id': '1000',
'type': 'people',
'attributes': {
'name': 'alice2'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=404
)
# Patching non existent attribute
detail = self.test_app().patch_json(
'/people/1',
{
'data': {
'type': 'people',
'id': '1',
'attributes': {
'non_existent': 'splat'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=404
).json['errors'][0]['detail']
self.assertIn('has no attribute',detail)
# Patching non existent relationship
detail = self.test_app().patch_json(
'/people/1',
{
'data': {
'type': 'people',
'id': '1',
'attributes': {
'name': 'splat'
},
'relationships': {
'non_existent': {
'data': None
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=404
).json['errors'][0]['detail']
self.assertIn('has no relationship',detail)
###############################################
# DELETE tests.
###############################################
def test_spec_delete_item(self):
'''Should delete comments/5
An individual resource can be deleted by making a DELETE request to the
resource’s URL
'''
# Check that comments/5 exists.
self.test_app().get('/comments/5')
# Delete comments/5.
self.test_app().delete('/comments/5')
# Check that comments/5 no longer exists.
self.test_app().get('/comments/5', status=404)
def test_spec_delete_no_such_item(self):
'''Should fail to delete non-existent comments/99999
A server SHOULD return a 404 Not Found status code if
a deletion request fails due to the resource not existing.
'''
# Delete comments/99999.
self.test_app().delete('/comments/99999', status=404)
def test_spec_delete_invalid_item(self):
'''Should fail to delete non-existent comments/invalid
A server SHOULD return a 404 Not Found status code if
a deletion request fails due to the resource not existing.
'''
# Delete comments/invalid
self.test_app().delete('/comments/invalid', status=404)
class TestErrors(DBTestBase):
'''Test that errors are thrown properly.'''
###############################################
# Error tests.
###############################################
def test_errors_structure(self):
'''Errors should be array of objects with code, title, detail members.'''
r = self.test_app().get(
'/people',
headers={ 'Content-Type': 'application/vnd.api+json; param=val' },
status=415,
)
self.assertIn('errors', r.json)
self.assertIsInstance(r.json['errors'], list)
err = r.json['errors'][0]
self.assertIn('code', err)
self.assertIn('title', err)
self.assertIn('detail', err)
def test_errors_only_controlled_paths(self):
'''Error handlers only for controlled paths ('api' and 'metadata')'''
app = self.test_app(
options={'pyramid_jsonapi.route_pattern_api_prefix': 'api'}
)
# Both /api/ and /metadata/ should have json structured errors
for path in ('/api/', '/metadata/'):
json = app.get(path, status=404).json
# Other paths should not have json errors
for path in ('/', '/splat/', '/api_extra/'):
r = app.get(path, status=404)
self.assertRaises(AttributeError, getattr, r, 'json')
def test_errors_composite_key(self):
'''Should raise exception if a model has a composite key.'''
self.assertRaisesRegex(
Exception,
r'^Model \S+ has more than one primary key.$',
self.test_app,
{'pyramid_jsonapi_tests.models_iterable': 'composite_key'}
)
class TestMalformed(DBTestBase):
'''Various malformed POSTs and PATCHes.'''
def test_malformed_collection_post_not_single_item(self):
'''Should complain about data being a list.'''
self.test_app().post_json(
'/people',
{'type': 'people', 'data': []},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
def test_malformed_collection_post_no_data(self):
'''Should complain about lack of data attribute.'''
self.test_app().post_json(
'/people',
{'type': 'people'},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
def test_malformed_item_patch_no_data(self):
'''Should complain about lack of data attribute.'''
self.test_app().patch_json(
'/people/1',
{'type': 'people', 'id': '1'},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
def test_no_filter_operator_defaults_to_eq(self):
'''Missing filter operator should behave as 'eq'.'''
r = self.test_app().get('/people?filter[name:eq]=alice')
op = r.json['data'][0]
r = self.test_app().get('/people?filter[name]=alice')
noop = r.json['data'][0]
self.assertEqual(op, noop)
def test_malformed_filter_unregistered_operator(self):
'''Unkown filter operator should raise 400 BadRequest.'''
self.test_app().get(
'/people?filter[name:not_an_op]=splat',
status=400
)
def test_malformed_filter_bad_operator(self):
'''Known filter with no comparator should raise 500 InternalServerError.'''
self.test_app().get(
'/people?filter[name:bad_op]=splat',
status=500
)
def test_malformed_filter_unknown_column(self):
'''Unkown column should raise 400 BadRequest.'''
self.test_app().get(
'/people?filter[unknown_column:eq]=splat',
status=400
)
class TestHybrid(DBTestBase):
'''Test cases for @hybrid_property attributes.'''
def test_hybrid_readonly_get(self):
'''Blog object should have owner_name attribute.'''
atts = self.test_app().get(
'/blogs/1'
).json['data']['attributes']
self.assertIn('owner_name', atts)
self.assertEqual(atts['owner_name'], 'alice')
def test_hybrid_readonly_patch(self):
'''Updating owner_name should fail with 409.'''
self.test_app().patch_json(
'/blogs/1',
{
'data': {
'id': '1',
'type': 'blogs',
'attributes': {
'owner_name': 'alice2'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=409
)
def test_hybrid_writeable_patch(self):
'''Should be able to update author_name of Post object.'''
# Patch post 1 and change author_name to 'alice2'
r = self.test_app().patch_json(
'/posts/1',
{
'data': {
'id': '1',
'type': 'posts',
'attributes': {
'author_name': 'alice2'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
)
# author_name should be in the list of updated attributes.
self.assertIn('author_name', r.json['meta']['updated']['attributes'])
# Fetch alice back...
data = self.test_app().get('/people/1').json['data']
# ...should now be called alice2.
self.assertEqual(data['attributes']['name'], 'alice2')
class TestHybridRelationships(DBTestBase):
'''Test cases for @hybrid_property relationships.'''
def test_hybrid_rel_to_one_get(self):
'''Post should have a relationship called blog_owner'''
data = self.test_app().get('/posts/1').json['data']
# Should have a relationship called blog_owner.
self.assertIn('blog_owner', data['relationships'])
# But not an attribute
self.assertNotIn('blog_owner', data['attributes'])
self.assertEqual(
data['relationships']['blog_owner']['data'],
{'type': 'people', 'id': '1'}
)
def test_hybrid_rel_to_many_get(self):
'''Blog should have a relationship called posts_authors'''
data = self.test_app().get('/blogs/1').json['data']
# Should have a relationship called posts_authors.
self.assertIn('posts_authors', data['relationships'])
# But not an attribute
self.assertNotIn('posts_authors', data['attributes'])
self.assertEqual(
data['relationships']['posts_authors']['data'],
[{'type': 'people', 'id': '1'}]
)
class TestJoinedTableInheritance(DBTestBase):
'''Test cases for sqlalchemy joined table inheritance pattern.'''
def test_joined_benign_create_fetch(self):
'''Should create BenignComment with author people/1 and then fetch it.'''
content = 'Main content.'
fawning_text = 'You are so great.'
created = self.test_app().post_json(
'/benign_comments',
{
'data': {
'type': 'benign_comments',
'attributes': {
'content': content,
'fawning_text': fawning_text
},
'relationships': {
'author': {
'data': {'type': 'people', 'id': '1'}
}
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=201
).json['data']
# Fetch the object back
fetched = self.test_app().get(
'/benign_comments/{}'.format(created['id'])
).json['data']
self.assertEqual(fetched['attributes']['content'], content)
self.assertEqual(
fetched['attributes']['fawning_text'],
fawning_text
)
self.assertEqual(fetched['relationships']['author']['data']['id'],'1')
class TestFeatures(DBTestBase):
'''Test case for features beyond spec.'''
def test_feature_invisible_column(self):
'''people object should not have attribute "invisible".'''
atts = self.test_app().get(
'/people/1'
).json['data']['attributes']
self.assertNotIn('invisible', atts)
self.assertNotIn('invisible_hybrid', atts)
def test_feature_invisible_relationship(self):
'''people object should not have relationship "invisible_comments".'''
rels = self.test_app().get(
'/people/1'
).json['data']['relationships']
self.assertNotIn('invisible_comments', rels)
def test_feature_rename_collection(self):
'''Should be able to fetch from whatsits even though table is things.'''
# There should be whatsits...
self.test_app().get('/whatsits')
# ...but not things.
self.test_app().get('/things', status=404)
def test_feature_construct_with_models_list(self):
'''Should construct an api from a list of models.'''
test_app = self.test_app(
options={'pyramid_jsonapi_tests.models_iterable': 'list'}
)
test_app.get('/blogs/1')
def test_feature_debug_endpoints(self):
'''Should create a set of debug endpoints for manipulating the database.'''
test_app = self.test_app(
options={
'pyramid_jsonapi.debug_endpoints': 'true',
'pyramid_jsonapi.debug_test_data_module': 'test_project.test_data'
}
)
test_app.get('/debug/populate')
def test_feature_disable_schema_validation(self):
'''Should disable schema validation.'''
# Create an app without schema validation.
test_app = self.test_app(
options = {
'pyramid_jsonapi.schema_validation': 'false'
}
)
# Schema validation produces 400 without 'type', without validation we
# get 409 (Unsupported type None)
test_app.post_json(
'/people',
{
'data': {
'not_type': 'people',
'attributes': {
'name': 'test'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=409
)
def test_feature_alternate_schema_file(self):
'''Should load alternate schema file.'''
test_app = self.test_app(
options={'pyramid_jsonapi.schema_file': '{}/test-alt-schema.json'.format(parent_dir)}
)
test_app.post_json(
'/people',
{
'data': {
'not_type': 'people',
'attributes': {
'name': 'test'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'},
status=400
)
def test_feature_debug_meta(self):
'''Should add meta information.'''
test_app = self.test_app(
options={'pyramid_jsonapi.debug_meta': 'true'}
)
self.assertIn('debug',test_app.get('/people/1').json['meta'])
def test_feature_expose_foreign_keys(self):
"""Should return blog with owner_id."""
test_app = self.test_app(
options={'pyramid_jsonapi.expose_foreign_keys': 'true'}
)
self.assertIn('owner_id', test_app.get('/blogs/1').json['data']['attributes'])
class TestBugs(DBTestBase):
def test_19_last_negative_offset(self):
'''last link should not have negative offset.
#19: 'last' link has negative offset if zero results are returned
'''
# Need an empty collection: use a filter that will not match.
last = self.test_app().get(
'/posts?pj_include_count=1&filter[title:eq]=frog'
).json['links']['last']
offset = int(
urllib.parse.parse_qs(
urllib.parse.urlparse(last).query
)['page[offset]'][0]
)
self.assertGreaterEqual(offset, 0)
def test_20_non_string_id(self):
'''Creating single object should not result in integer id.
#20: creating single object returns non string id
'''
data = self.test_app().post_json(
'/people',
{
'data': {
'type': 'people',
'attributes': {
'name': 'test'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'}
).json['data']
self.assertIsInstance(data['id'], str)
def test_56_post_with_non_id_primary_key(self):
'''POST to model with non 'id' primary key should work.
#56: POSTing a new item where the primary key column is not 'id' causes
an error.
'''
data = self.test_app().post_json(
'/comments',
{
'data': {
'id': '1000',
'type': 'comments',
'attributes': {
'content': 'test'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'}
).json['data']
self.assertEqual(data['id'],'1000')
def test_association_proxy(self):
'''Should treat association proxy as a relationship.'''
data = self.test_app().get('/people/1').json['data']
self.assertIn('articles_by_proxy', data['relationships'])
def test_175_head_method(self):
'''Should produce OK for HEAD request.'''
self.test_app().head('/people/1')
class TestEndpoints(DBTestBase):
"""Tests for endpoint configuration."""
def test_api_prefix(self):
"""Test setting api prefix."""
self.test_app(
options={
'pyramid_jsonapi.route_pattern_api_prefix': 'api'
}).get('/api/people')
def test_metadata_endpoints_disable(self):
self.test_app(
options={
'pyramid_jsonapi.metadata_endpoints': 'false'
}).get('/metadata/JSONSchema', status=404)
def test_api_version(self):
"""Test setting api version."""
self.test_app(
options={
'pyramid_jsonapi.api_version': '10'
}).get('/10/people')
self.test_app(
options={
'pyramid_jsonapi.api_version': '10'
}).get('/10/metadata/JSONSchema')
def test_route_pattern_prefix(self):
"""Test setting route_pattern_prefix."""
self.test_app(
options={
'pyramid_jsonapi.route_pattern_prefix': 'SPLAT'
}).get('/SPLAT/people')
self.test_app(
options={
'pyramid_jsonapi.route_pattern_prefix': 'SPLAT'
}).get('/SPLAT/metadata/JSONSchema')
def test_route_pattern_prefix_error(self):
"""Test setting route_pattern_prefix error handling."""
resp = self.test_app(
options={
'pyramid_jsonapi.route_pattern_prefix': 'SPLAT'
}).get('/SPLAT/invalid',
status=404)
self.assertTrue(resp.content_type == 'application/vnd.api+json')
def test_api_version(self):
"""Test setting api_version."""
self.test_app(
options={
'pyramid_jsonapi.api_version': 'v1',
}).get('/v1/people')
def test_api_version_error(self):
"""Test setting api_version error handling."""
resp = self.test_app(
options={
'pyramid_jsonapi.api_version': 'v1',
}).get('/v1/invalid',
status=404)
self.assertTrue(resp.content_type == 'application/vnd.api+json')
def test_route_pattern_api_prefix(self):
"""Test setting route_pattern_api_prefix."""
self.test_app(
options={
'pyramid_jsonapi.route_pattern_api_prefix': 'API'
}).get('/API/people')
def test_route_pattern_api_prefix_error(self):
"""Test setting route_pattern_prefix error handling."""
resp = self.test_app(
options={
'pyramid_jsonapi.route_pattern_api_prefix': 'API'
}).get('/API/invalid',
status=404)
self.assertTrue(resp.content_type == 'application/vnd.api+json')
def test_route_pattern_metadata_prefix(self):
"""Test setting route_pattern_metadata_prefix."""
self.test_app(
options={
'pyramid_jsonapi.route_pattern_metadata_prefix': 'METADATA'
}).get('/METADATA/JSONSchema')
def test_route_pattern_metadata_prefix_error(self):
"""Test setting route_pattern_prefix error handling."""
resp = self.test_app(
options={
'pyramid_jsonapi.route_pattern_metadata_prefix': 'METADATA'
}).get('/METADATA/invalid',
status=404)
self.assertTrue(resp.content_type == 'application/vnd.api+json')
def test_route_pattern_all_prefixes(self):
"""Test setting all pattern prefixes."""
api = self.test_app(
options={
'pyramid_jsonapi.route_pattern_prefix': 'SPLAT',
'pyramid_jsonapi.api_version': 'v1',
'pyramid_jsonapi.route_pattern_api_prefix': 'API',
'pyramid_jsonapi.route_pattern_metadata_prefix': 'METADATA'
})
api.get('/SPLAT/v1/API/people')
api.get('/SPLAT/v1/METADATA/JSONSchema')
def test_route_pattern_all_prefixes_error(self):
"""Test setting all pattern prefixes error handling."""
api = self.test_app(
options={
'pyramid_jsonapi.route_pattern_prefix': 'SPLAT',
'pyramid_jsonapi.api_version': 'v1',
'pyramid_jsonapi.route_pattern_api_prefix': 'API',
'pyramid_jsonapi.route_pattern_metadata_prefix': 'METADATA'
})
self.assertEqual(
api.get('/SPLAT/v1/API/invalid', status=404).content_type,
'application/vnd.api+json'
)
self.assertEqual(
api.get('/SPLAT/v1/METADATA/invalid', status=404).content_type,
'application/vnd.api+json'
)
class TestMetaData(DBTestBase):
"""Tests for the metadata plugins."""
@classmethod
def setUpClass(cls):
"""Setup metadata plugins."""
super().setUpClass()
config = Configurator()
cls.api = pyramid_jsonapi.PyramidJSONAPI(config, [])
cls.api.create_jsonapi()
cls.metadata = pyramid_jsonapi.metadata.MetaData(cls.api)
def test_no_jsonschema_module(self):
"""Test how things break if jsonschema is disabled."""
self.test_app(
options={
'pyramid_jsonapi.metadata_modules': ''
}).post('/people', '{}', status=500)
self.test_app(
options={
'pyramid_jsonapi.metadata_modules': ''
}).get('/metadata/JSONSchema', '{}', status=404)
def test_disable_jsonschema_validation(self):
"""Test disabling jsonschema and validation together works."""
self.test_app(
options={
'pyramid_jsonapi.metadata_modules': '',
'pyramid_jsonapi.schema_validation': 'false',
}).post_json(
'/people',
{
'data': {
'type': 'people',
'attributes': {
'name': 'test'
}
}
},
headers={'Content-Type': 'application/vnd.api+json'}
)
def test_jsonschema_template(self):
"""Test that template() returns valid json, and as a view."""
dir_tmpl = json.dumps(self.metadata.JSONSchema.template())
view_tmpl = self.test_app().get('/metadata/JSONSchema', '{}').json
def test_jsonschema_load_schema_file(self):
"""Test loading jsonschema from file."""
path = "/tmp/nosuchfile.json"
schema = {"test": "true"}
self.api.settings.schema_file = path
with patch("builtins.open", mock_open(read_data=json.dumps(schema))) as mock_file:
self.metadata.JSONSchema.load_schema()
mock_file.assert_called_with(path)
self.assertDictEqual(schema, self.metadata.JSONSchema.schema)
def test_jsonschema_resource_attributes_view(self):
"""Test that resource_attributes view returns valid json."""
self.test_app().get('/metadata/JSONSchema/resource/people', status=200).json
def test_jsonschema_resource_attributes_view_not_found(self):
"""Test that view returns 404 for non-existent endpoint."""
self.test_app().get('/metadata/JSONSchema/resource/invalid', status=404)
def test_jsonschema_endpoint_schema_view(self):
"""Check that endpoint_schema returns json with appropriate query params."""
self.test_app().get('/metadata/JSONSchema/endpoint/people',
params='method=get&direction=request&code=200',
status=200).json
self.test_app().get('/metadata/JSONSchema/endpoint/people',
params='method=get&direction=response&code=200',
status=200).json
def test_jsonschema_endpoint_schema_view_failure_schema(self):
"""Test that a reference to the failure schema is returned for code=4xx."""
res = self.test_app().get('/metadata/JSONSchema/endpoint/people',
params='method=get&direction=response&code=404',
status=200).json
self.assertEqual(res, {"$ref" : "#/definitions/failure"})
def test_jsonschema_endpoint_schema_view_bad_params(self):
"""Test that 400 returned if missing/bad query params specified."""
self.test_app().get('/metadata/JSONSchema/endpoint/people', status=400).json
self.test_app().get('/metadata/JSONSchema/endpoint/people', params='cat=1', status=400).json
def test_jsonschema_endpoint_schema_view_not_found(self):
self.test_app().get('/metadata/JSONSchema/endpoint/invalid',
params='method=get&direction=request&code=200',
status=404).json
def test_jsonschema_invalid_schema(self):
"""Invalid schema mappings generate empty resource attrs."""
# posts has JSONB field
res = self.test_app().get('/metadata/JSONSchema/resource/posts').json
self.assertEqual(res, {})
def test_openapi_swagger_ui_view(self):
"""Test that swagger_ui view returns html."""
html = self.test_app().get('/metadata/OpenAPI', status=200).html
def test_openapi_specification_view(self):
"""Test that specification view returns valid json."""
self.test_app().get('/metadata/OpenAPI/specification', status=200).json
# def test_openapi_specification_valid(self):
# """Test that the openapi specification returned is valid."""
# validate_spec(self.test_app().get('/metadata/OpenAPI/specification', status=200).json)
# print(json.dumps(self.test_app().get('/metadata/OpenAPI/specification', status=200).json, indent=4))
def test_openapi_file(self):
"""Test providing openapi spec updates in a file."""
path = os.path.dirname(os.path.realpath(__file__))
res = self.test_app(
options={
'pyramid_jsonapi.openapi_file': os.path.join(path, 'test-openapi.json'),
}).get('/metadata/OpenAPI/specification', status=200).json
# Check that openapi file merge has overridden version string
self.assertEqual("999", res['openapi'])
if __name__ == "__main__":
unittest.main()
| colinhiggs/pyramid-jsonapi | test_project/test_project/tests.py | Python | agpl-3.0 | 143,952 |
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import colander
import venusian
from BTrees.OOBTree import OOBTree
from persistent.list import PersistentList
from persistent.dict import PersistentDict
from webob.multidict import MultiDict
from zope.interface import implementer
from pyramid.threadlocal import get_current_request
from substanced.util import get_oid
from substanced.util import renamer
from substanced.content import content
from dace.objectofcollaboration.principal.role import DACE_ROLES
from dace.objectofcollaboration.principal.util import get_access_keys
from dace.objectofcollaboration.entity import Entity
from dace.descriptors import (
SharedUniqueProperty,
CompositeUniqueProperty,
SharedMultipleProperty,
CompositeMultipleProperty)
from dace.util import getSite, get_obj, find_catalog
from pontus.schema import Schema
from pontus.core import VisualisableElement
from pontus.widget import (
Select2Widget)
from novaideo import _, ACCESS_ACTIONS
from novaideo.content.interface import (
IVersionableEntity,
IDuplicableEntity,
ISearchableEntity,
ICommentable,
IPrivateChannel,
IChannel,
ICorrelableEntity,
IPresentableEntity,
INode,
IEmojiable,
IPerson,
ISignalableEntity,
ISustainable,
IDebatable,
ITokenable)
BATCH_DEFAULT_SIZE = 8
SEARCHABLE_CONTENTS = {}
SUSTAINABLE_CONTENTS = {}
NOVAIDO_ACCES_ACTIONS = {}
ADVERTISING_CONTAINERS = {}
ON_LOAD_VIEWS = {}
class AnonymisationKinds(object):
anonymity = 'anonymity'
pseudonymity = 'pseudonymity'
@classmethod
def get_items(cls):
return {
cls.anonymity: _('Anonymity'),
cls.pseudonymity: _('Pseudonymity')
}
@classmethod
def get_title(cls, item):
items = cls.get_items()
return items.get(item, None)
class Evaluations():
support = 'support'
oppose = 'oppose'
def get_searchable_content(request=None):
if request is None:
request = get_current_request()
return getattr(request, 'searchable_contents', {})
class advertising_banner_config(object):
""" A function, class or method decorator which allows a
developer to create advertising banner registrations.
Advertising banner is a panel. See pyramid_layout.panel_config.
"""
def __init__(self, name='', context=None, renderer=None, attr=None):
self.name = name
self.context = context
self.renderer = renderer
self.attr = attr
def __call__(self, wrapped):
settings = self.__dict__.copy()
def callback(context, name, ob):
config = context.config.with_package(info.module)
config.add_panel(panel=ob, **settings)
ADVERTISING_CONTAINERS[self.name] = {'title': ob.title,
'description': ob.description,
'order': ob.order,
'validator': ob.validator,
'tags': ob.tags
#TODO add validator ob.validator
}
info = venusian.attach(wrapped, callback, category='pyramid_layout')
if info.scope == 'class':
# if the decorator was attached to a method in a class, or
# otherwise executed at class scope, we need to set an
# 'attr' into the settings if one isn't already in there
if settings['attr'] is None:
settings['attr'] = wrapped.__name__
settings['_info'] = info.codeinfo # fbo "action_method"
return wrapped
class access_action(object):
""" Decorator for creationculturelle access actions.
An access action allows to view an object"""
def __init__(self, access_key=None):
self.access_key = access_key
def __call__(self, wrapped):
def callback(scanner, name, ob):
if ob.context in ACCESS_ACTIONS:
ACCESS_ACTIONS[ob.context].append({'action': ob,
'access_key': self.access_key})
else:
ACCESS_ACTIONS[ob.context] = [{'action': ob,
'access_key': self.access_key}]
venusian.attach(wrapped, callback)
return wrapped
def can_access(user, context, request=None, root=None):
""" Return 'True' if the user can access to the context"""
declared = getattr(getattr(context, '__provides__', None),
'declared', [None])[0]
for data in ACCESS_ACTIONS.get(declared, []):
if data['action'].processsecurity_validation(None, context):
return True
return False
_marker = object()
def serialize_roles(roles, root=None):
result = []
principal_root = getSite()
if principal_root is None:
return []
if root is None:
root = principal_root
root_oid = str(get_oid(root, ''))
principal_root_oid = str(get_oid(principal_root, ''))
for role in roles:
if isinstance(role, tuple):
obj_oid = str(get_oid(role[1], ''))
result.append((role[0]+'_'+obj_oid).lower())
superiors = getattr(DACE_ROLES.get(role[0], _marker),
'all_superiors', [])
result.extend([(r.name+'_'+obj_oid).lower()
for r in superiors])
else:
result.append(role.lower()+'_'+root_oid)
superiors = getattr(DACE_ROLES.get(role, _marker),
'all_superiors', [])
result.extend([(r.name+'_'+root_oid).lower() for r in
superiors])
for superior in superiors:
if superior.name == 'Admin':
result.append('admin_'+principal_root_oid)
break
return list(set(result))
def generate_access_keys(user, root):
return get_access_keys(
user, root=root)
@implementer(ICommentable)
class Commentable(VisualisableElement, Entity):
""" A Commentable entity is an entity that can be comment"""
name = renamer()
comments = CompositeMultipleProperty('comments')
def __init__(self, **kwargs):
super(Commentable, self).__init__(**kwargs)
self.len_comments = 0
def update_len_comments(self):
result = len(self.comments)
result += sum([c.update_len_comments() for c in self.comments])
self.len_comments = result
return self.len_comments
def addtoproperty(self, name, value, moving=None):
super(Commentable, self).addtoproperty(name, value, moving)
if name == 'comments':
channel = getattr(self, 'channel', self)
channel.len_comments += 1
if self is not channel:
self.len_comments += 1
def delfromproperty(self, name, value, moving=None):
super(Commentable, self).delfromproperty(name, value, moving)
if name == 'comments':
channel = getattr(self, 'channel', self)
channel.len_comments -= 1
if self is not channel:
self.len_comments -= 1
@implementer(IDebatable)
class Debatable(VisualisableElement, Entity):
""" A Debatable entity is an entity that can be comment"""
channels = CompositeMultipleProperty('channels', 'subject')
def __init__(self, **kwargs):
super(Debatable, self).__init__(**kwargs)
@property
def channel(self):
channels = getattr(self, 'channels', [])
return channels[0] if channels else None
def get_channel(self, user):
return self.channel
def get_title(self, user=None):
return getattr(self, 'title', '')
def subscribe_to_channel(self, user):
channel = getattr(self, 'channel', None)
if channel and (user not in channel.members):
channel.addtoproperty('members', user)
def add_new_channel(self):
self.addtoproperty('channels', Channel())
@content(
'channel',
icon='icon novaideo-icon icon-idea',
)
@implementer(IChannel)
class Channel(Commentable):
"""Channel class"""
type_title = _('Channel')
icon = 'icon novaideo-icon icon-idea'
templates = {'default': 'novaideo:views/templates/channel_result.pt'}
name = renamer()
members = SharedMultipleProperty('members', 'following_channels')
subject = SharedUniqueProperty('subject', 'channels')
def __init__(self, **kwargs):
super(Channel, self).__init__(**kwargs)
self.set_data(kwargs)
self._comments_at = OOBTree()
def add_comment(self, comment):
self._comments_at[comment.created_at] = get_oid(comment)
def remove_comment(self, comment):
self._comments_at.pop(comment.created_at)
def get_comments_between(self, start, end):
return list(self._comments_at.values(
min=start, max=end))
def get_subject(self, user=None):
subject = self.subject
return subject if subject else getattr(self, '__parent__', None)
def get_title(self, user=None):
title = getattr(self, 'title', '')
if not title:
return getattr(self.get_subject(user), 'title', None)
return title
def is_discuss(self):
return self.subject.__class__.__name__.lower() == 'person'
@implementer(IEmojiable)
class Emojiable(Entity):
def __init__(self, **kwargs):
super(Emojiable, self).__init__(**kwargs)
self.emojis = OOBTree()
self.users_emoji = OOBTree()
def add_emoji(self, emoji, user):
user_oid = get_oid(user)
current_emoji = self.get_user_emoji(user)
if current_emoji:
self.remove_emoji(current_emoji, user)
if emoji:
self.emojis.setdefault(emoji, PersistentList())
self.emojis[emoji].append(user_oid)
self.users_emoji[user_oid] = emoji
def remove_emoji(self, emoji, user):
user_oid = get_oid(user)
if emoji in self.emojis and \
user_oid in self.emojis[emoji]:
self.emojis[emoji].remove(user_oid)
self.users_emoji.pop(user_oid)
def get_user_emoji(self, user):
user_oid = get_oid(user)
return self.users_emoji.get(user_oid, None)
def can_add_reaction(self, user, process):
return False
@content(
'privatechannel',
icon='icon novaideo-icon icon-idea',
)
@implementer(IPrivateChannel)
class PrivateChannel(Channel):
"""Channel class"""
def __init__(self, **kwargs):
super(PrivateChannel, self).__init__(**kwargs)
self.set_data(kwargs)
def get_subject(self, user=None):
subject = None
for member in self.members:
if member is not user:
subject = member
break
return subject if subject else getattr(self, '__parent__', None)
def get_title(self, user=None):
title = getattr(self, 'title', '')
if not title:
return getattr(self.get_subject(user), 'title', None)
return title
@implementer(IVersionableEntity)
class VersionableEntity(Entity):
""" A Versionable entity is an entity that can be versioned"""
version = CompositeUniqueProperty('version', 'nextversion')
nextversion = SharedUniqueProperty('nextversion', 'version')
@property
def current_version(self):
""" Return the current version"""
if self.nextversion is None:
return self
else:
return self.nextversion.current_version
@property
def history(self):
""" Return all versions"""
result = []
if self.version is None:
return [self]
else:
result.append(self)
result.extend(self.version.history)
return result
def destroy(self):
"""Remove branch"""
if self.version:
self.version.destroy()
if self.nextversion:
self.nextversion.delfromproperty('version', self)
@implementer(IDuplicableEntity)
class DuplicableEntity(Entity):
""" A Duplicable entity is an entity that can be duplicated"""
originalentity = SharedUniqueProperty('originalentity', 'duplicates')
duplicates = SharedMultipleProperty('duplicates', 'originalentity')
@colander.deferred
def keywords_choice(node, kw):
root = getSite()
values = [(i, i) for i in sorted(root.keywords)]
create = getattr(root, 'can_add_keywords', True)
return Select2Widget(max_len=5,
values=values,
create=create,
multiple=True)
class SearchableEntitySchema(Schema):
keywords = colander.SchemaNode(
colander.Set(),
widget=keywords_choice,
title=_('Keywords'),
description=_("To add keywords, you need to separate them by commas "
"and then tap the « Enter » key to validate your selection.")
)
@implementer(ISearchableEntity)
class SearchableEntity(VisualisableElement, Entity):
""" A Searchable entity is an entity that can be searched"""
templates = {'default': 'novaideo:templates/views/default_result.pt',
'bloc': 'novaideo:templates/views/default_result.pt'}
def __init__(self, **kwargs):
super(SearchableEntity, self).__init__(**kwargs)
self.keywords = PersistentList()
@property
def is_published(self):
return 'published' in self.state
@property
def is_workable(self):
return self.is_published
@property
def relevant_data(self):
return [getattr(self, 'title', ''),
getattr(self, 'description', ''),
', '.join(getattr(self, 'keywords', []))]
def set_source_data(self, source_data):
if not hasattr(self, 'source_data'):
self.source_data = PersistentDict({})
app_name = source_data.get('app_name')
self.source_data.setdefault(app_name, {})
self.source_data[app_name] = source_data
def get_source_data(self, app_id):
if not hasattr(self, 'source_data'):
return {}
return self.source_data.get(app_id, {})
def is_managed(self, root):
return True
def get_title(self, user=None):
return getattr(self, 'title', '')
def _init_presentation_text(self):
pass
def get_release_date(self):
return getattr(self, 'release_date', self.modified_at)
def presentation_text(self, nb_characters=400):
return getattr(self, 'description', "")[:nb_characters]+'...'
def get_more_contents_criteria(self):
"return specific query, filter values"
return None, {
'metadata_filter': {
'states': ['published'],
'keywords': list(self.keywords)
}
}
@implementer(IPresentableEntity)
class PresentableEntity(Entity):
""" A Presentable entity is an entity that can be presented"""
def __init__(self, **kwargs):
super(PresentableEntity, self).__init__(**kwargs)
self._email_persons_contacted = PersistentList()
@property
def len_contacted(self):
return len(self._email_persons_contacted)
@property
def persons_contacted(self):
""" Return all contacted persons"""
dace_catalog = find_catalog('dace')
novaideo_catalog = find_catalog('novaideo')
identifier_index = novaideo_catalog['identifier']
object_provides_index = dace_catalog['object_provides']
result = []
for email in self._email_persons_contacted:
query = object_provides_index.any([IPerson.__identifier__]) &\
identifier_index.any([email])
users = list(query.execute().all())
user = users[0] if users else None
if user is not None:
result.append(user)
else:
result.append(email.split('@')[0].split('+')[0])
return set(result)
@implementer(ICorrelableEntity)
class CorrelableEntity(Entity):
"""
A Correlable entity is an entity that can be correlated.
A correlation is an abstract association between source entity
and targets entities.
"""
source_correlations = SharedMultipleProperty('source_correlations',
'source')
target_correlations = SharedMultipleProperty('target_correlations',
'targets')
@property
def correlations(self):
"""Return all source correlations and target correlations"""
result = [c.target for c in self.source_correlations]
result.extend([c.source for c in self.target_correlations])
return list(set(result))
@property
def all_source_related_contents(self):
lists_targets = [(c.targets, c) for c in self.source_correlations]
return [(target, c) for targets, c in lists_targets
for target in targets]
@property
def all_target_related_contents(self):
return [(c.source, c) for c in self.target_correlations]
@property
def all_related_contents(self):
related_contents = self.all_source_related_contents
related_contents.extend(self.all_target_related_contents)
return related_contents
@property
def contextualized_contents(self):
lists_contents = [(c.targets, c) for c in
self.contextualized_correlations]
lists_contents = [(target, c) for targets, c in lists_contents
for target in targets]
lists_contents.extend([(c.source, c) for c in
self.contextualized_correlations])
return lists_contents
def get_related_contents(self, type_=None, tags=[]):
if type_ is None and not tags:
return self.all_related_contents
return [(content, c) for content, c in self.all_related_contents
if (type_ is None or c.type == type_) and
(not tags or any(t in tags for t in c.tags))]
class ExaminableEntity(Entity):
"""
A Examinable entity is an entity that can be examined.
"""
opinions_base = {}
@property
def opinion_value(self):
return self.opinions_base.get(
getattr(self, 'opinion', {}).get('opinion', ''), None)
@implementer(INode)
class Node(Entity):
def __init__(self, **kwargs):
super(Node, self).__init__(**kwargs)
self.graph = PersistentDict()
def get_node_id(self):
return str(self.__oid__).replace('-', '_')
def get_node_descriminator(self):
return 'node'
def init_graph(self, calculated=[]):
result = self.get_nodes_data()
self.graph = PersistentDict(result[0])
oid = self.get_node_id()
newcalculated = list(calculated)
newcalculated.append(oid)
for node in self.graph:
if node not in newcalculated:
node_obj = get_obj(self.graph[node]['oid'])
if node_obj:
graph, newcalculated = node_obj.init_graph(
newcalculated)
return self.graph, newcalculated
def get_nodes_data(self, calculated=[]):
oid = self.get_node_id()
newcalculated = list(calculated)
if oid in calculated:
return {}, newcalculated
all_target_contents = [r for r in self.all_target_related_contents
if isinstance(r[0], Node)]
targets = [{'id': t.get_node_id(),
'type': c.type_name,
'oid': getattr(t, '__oid__', 0)}
for (t, c) in all_target_contents]
all_source_contents = [r for r in self.all_source_related_contents
if r[0] not in all_target_contents
and isinstance(r[0], Node)]
targets.extend([{'id': t.get_node_id(),
'type': c.type_name,
'oid': getattr(t, '__oid__', 0)}
for (t, c) in all_source_contents])
result = {oid: {
'oid': self.__oid__,
'title': self.title,
'descriminator': self.get_node_descriminator(),
'targets': targets
}}
all_source_contents.extend(all_target_contents)
newcalculated.append(oid)
for r_content in all_source_contents:
sub_result, newcalculated = r_content[0].get_nodes_data(newcalculated)
result.update(sub_result)
return result, newcalculated
def get_all_sub_nodes(self):
oid = self.get_node_id()
return set([get_obj(self.graph[id_]['oid']) for id_ in self.graph
if id_ != oid])
def get_sub_nodes(self):
oid = self.get_node_id()
return set([get_obj(node['oid']) for
node in self.graph[oid]['targets']])
@implementer(ISignalableEntity)
class SignalableEntity(Entity):
reports = CompositeMultipleProperty('reports')
censoring_reason = CompositeUniqueProperty('censoring_reason')
def __init__(self, **kwargs):
super(SignalableEntity, self).__init__(**kwargs)
self.len_reports = 0
self.init_len_current_reports()
@property
def subject(self):
return self.__parent__
def init_len_current_reports(self):
self.len_current_reports = 0
def addtoproperty(self, name, value, moving=None):
super(SignalableEntity, self).addtoproperty(name, value, moving)
if name == 'reports':
self.len_current_reports = getattr(self, 'len_current_reports', 0)
self.len_reports = getattr(self, 'len_reports', 0)
self.len_current_reports += 1
self.len_reports += 1
@implementer(ISustainable)
class Sustainable(Entity):
"""Question class"""
def __init__(self, **kwargs):
super(Sustainable, self).__init__(**kwargs)
self.set_data(kwargs)
self.votes_positive = OOBTree()
self.votes_negative = OOBTree()
@property
def len_support(self):
return len(self.votes_positive)
@property
def len_opposition(self):
return len(self.votes_negative)
def add_vote(self, user, date, kind='positive'):
oid = get_oid(user)
if kind == 'positive':
self.votes_positive[oid] = date
else:
self.votes_negative[oid] = date
def withdraw_vote(self, user):
oid = get_oid(user)
if oid in self.votes_positive:
self.votes_positive.pop(oid)
elif oid in self.votes_negative:
self.votes_negative.pop(oid)
def has_vote(self, user):
oid = get_oid(user)
return oid in self.votes_positive or \
oid in self.votes_negative
def has_negative_vote(self, user):
oid = get_oid(user)
return oid in self.votes_negative
def has_positive_vote(self, user):
oid = get_oid(user)
return oid in self.votes_positive
@implementer(ITokenable)
class Tokenable(Entity):
"""Question class"""
tokens_opposition = CompositeMultipleProperty('tokens_opposition')
tokens_support = CompositeMultipleProperty('tokens_support')
def __init__(self, **kwargs):
super(Tokenable, self).__init__(**kwargs)
self.set_data(kwargs)
self.allocated_tokens = OOBTree()
self.len_allocated_tokens = PersistentDict({})
def add_token(self, user, evaluation_type):
user_oid = get_oid(user)
if user_oid in self.allocated_tokens:
self.remove_token(user)
self.allocated_tokens[user_oid] = evaluation_type
self.len_allocated_tokens.setdefault(evaluation_type, 0)
self.len_allocated_tokens[evaluation_type] += 1
def remove_token(self, user):
user_oid = get_oid(user)
if user_oid in self.allocated_tokens:
evaluation_type = self.allocated_tokens.pop(user_oid)
self.len_allocated_tokens.setdefault(evaluation_type, 0)
self.len_allocated_tokens[evaluation_type] -= 1
def evaluators(self, evaluation_type=None):
if evaluation_type:
return [get_obj(key) for value, key
in self.allocated_tokens.byValue(evaluation_type)]
return [get_obj(key) for key
in self.allocated_tokens.keys()]
def evaluation(self, user):
user_oid = get_oid(user, None)
return self.allocated_tokens.get(user_oid, None)
def remove_tokens(self, force=False):
evaluators = self.evaluators()
for user in evaluators:
user.remove_token(self)
if force:
self.remove_token(user)
def user_has_token(self, user, root=None):
if hasattr(user, 'has_token'):
return user.has_token(self, root)
return False
def init_support_history(self):
# [(user_oid, date, support_type), ...], support_type = {1:support, 0:oppose, -1:withdraw}
if not hasattr(self, '_support_history'):
setattr(self, '_support_history', PersistentList())
@property
def len_support(self):
return self.len_allocated_tokens.get(Evaluations.support, 0)
@property
def len_opposition(self):
return self.len_allocated_tokens.get(Evaluations.oppose, 0)
| ecreall/nova-ideo | novaideo/core.py | Python | agpl-3.0 | 25,754 |
import urllib.parse
import sys
from ccs import core
from ccs import constants
from . import response
def ticker():
s = __name__.split(".")[1]
r = sys._getframe().f_code.co_name
# complete request
cr = core.request(s, r)
return core.get(core.hostname(s), cr, core.header(s), core.compression(s), core.timeout(s))
def trades():
s = __name__.split(".")[1]
r = sys._getframe().f_code.co_name
# complete request
cr = core.request(s, r)
return core.get(core.hostname(s), cr, core.header(s), core.compression(s), core.timeout(s))
# nejaky problem s kodovanim
# def trades_chart():
# s = __name__.split(".")[1]
# r = sys._getframe().f_code.co_name
#
# # complete request
# cr = core.request(s, r)
#
# return core.get(core.hostname(s), cr, core.header(s), core.compression(s), core.timeout(s))
def orderbook():
s = __name__.split(".")[1]
r = sys._getframe().f_code.co_name
# complete request
cr = core.request(s, r)
return core.get(core.hostname(s), cr, core.header(s), core.compression(s), core.timeout(s)) | Honzin/ccs | dev/bitnz/public/__init__.py | Python | agpl-3.0 | 1,090 |
__author__ = 'tbri'
from openerp import models, fields, api, _
class add_sponsorship_wizard(models.TransientModel):
_name = 'add_sponsorship_wizard'
def _get_all_children(self):
c = []
children = self.env['res.partner'].search([('sponsored_child', '=', 'True')])
for n in children:
child_ref = '%s %s' % (n.child_ident, n.name)
c.append( (n.id, child_ref) )
return c
#sponsor_id = fields.Many2one('sponsor')
# see partner.py...........
## child_id = fields.Many2one('sponsored_child', domain=[('active','=',True)])
child_id = fields.Selection( _get_all_children , string=_('Child'))
sub_sponsor = fields.Many2one('res.partner', _('Sub Sponsor'), domain=[('sub_sponsor','=',True)])
start_date = fields.Date(_('Start date'))
end_date = fields.Date(_('End date'))
@api.one
def data_save(self):
print "DATA_SAVE 1", self._context
"""
DATA_SAVAE! {'lang': 'en_US', 'search_disable_custom_filters': True, 'tz': False, 'uid': 1, 'active_model': 'sponsor', 'active_ids': [1], 'active_id': 1}
"""
model = self._context['active_model']
active_id = self._context['active_id']
assert model == 'res.partner'
sponsor = self.env['res.partner'].browse(active_id)
assert sponsor.sponsor
print "DATA_SAVE 2", sponsor
print "DATA_SAVE 3", self.child_id
sponsorship = {'sponsor_id' : active_id,
'sponsored_child' : int(self.child_id),
'start_date' : self.start_date,
'end_date' : self.end_date,
'sub_sponsor' : self.sub_sponsor}
print "CREATING SPONSORSHP"
self.env['sponsorship'].create( sponsorship)
return {'type': 'ir.actions.act_window_close'}
| bringsvor/sponsor | wizards/add_sponsorship.py | Python | agpl-3.0 | 1,922 |
#!/usr/bin/env python2
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage archivematicaClientScript
# @author Joseph Perry <joseph@artefactual.com>
from __future__ import print_function
import os
import sys
import shutil
import django
django.setup()
# dashboard
from main.models import Job, SIP
# archivematicaCommon
from custom_handlers import get_script_logger
from databaseFunctions import createSIP
if __name__ == '__main__':
logger = get_script_logger("archivematica.mcp.client.generateDIPFromAIPGenerateDIP")
# COPY THE METS FILE
# Move the DIP Directory
fauxUUID = sys.argv[1]
unitPath = sys.argv[2]
date = sys.argv[3]
basename = os.path.basename(unitPath[:-1])
uuidLen = 36
originalSIPName = basename[:-(uuidLen+1)*2]
originalSIPUUID = basename[:-(uuidLen+1)][-uuidLen:]
METSPath = os.path.join(unitPath, "metadata/submissionDocumentation/data/", "METS.%s.xml" % (originalSIPUUID))
if not os.path.isfile(METSPath):
print("Mets file not found: ", METSPath, file=sys.stderr)
exit(-1)
# move mets to DIP
src = METSPath
dst = os.path.join(unitPath, "DIP", os.path.basename(METSPath))
shutil.move(src, dst)
# Move DIP
src = os.path.join(unitPath, "DIP")
dst = os.path.join("/var/archivematica/sharedDirectory/watchedDirectories/uploadDIP/", originalSIPName + "-" + originalSIPUUID)
shutil.move(src, dst)
try:
SIP.objects.get(uuid=originalSIPUUID)
except SIP.DoesNotExist:
# otherwise doesn't appear in dashboard
createSIP(unitPath, UUID=originalSIPUUID)
Job.objects.create(jobtype="Hack to make DIP Jobs appear",
directory=unitPath,
sip_id=originalSIPUUID,
currentstep="Completed successfully",
unittype="unitSIP",
microservicegroup="Upload DIP")
| sevein/archivematica | src/MCPClient/lib/clientScripts/generateDIPFromAIPGenerateDIP.py | Python | agpl-3.0 | 2,681 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from openerp.exceptions import ValidationError
class AccountDocumentTax(models.AbstractModel):
_name = 'account.document.tax'
currency_id = fields.Many2one('res.currency')
amount = fields.Monetary('Importe', currency_field='currency_id', required=True)
base = fields.Monetary('Base', currency_field='currency_id')
jurisdiction = fields.Selection(
[
('nacional', 'Nacional'),
('provincial', 'Provincial'),
('municipal', 'Municipal')
],
string='Jurisdiccion',
required=True,
)
name = fields.Char('Nombre', required=True)
company_id = fields.Many2one(
'res.company',
string='Compania',
required=True,
default=lambda self: self.env.user.company_id,
)
@api.constrains('amount')
def check_amount(self):
for tax in self:
if tax.amount <= 0:
raise ValidationError('El monto del impuesto debe ser mayor a 0')
@api.constrains('base')
def check_base(self):
for tax in self:
if tax.base < 0:
raise ValidationError('La base del impuesto no puede ser negativa')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| odoo-arg/odoo_l10n_ar | l10n_ar_taxes/models/account_document_tax.py | Python | agpl-3.0 | 2,137 |
#-*- coding: utf-8 -*-
"""
Certificates Tests.
"""
import itertools
import json
import ddt
import mock
import six
from django.conf import settings
from django.test.utils import override_settings
from opaque_keys.edx.keys import AssetKey
from six.moves import range
from cms.djangoapps.contentstore.tests.utils import CourseTestCase
from cms.djangoapps.contentstore.utils import get_lms_link_for_certificate_web_view, reverse_course_url
from common.djangoapps.course_modes.tests.factories import CourseModeFactory
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.roles import CourseInstructorRole, CourseStaffRole
from common.djangoapps.student.tests.factories import UserFactory
from common.djangoapps.util.testing import EventTestMixin, UrlResetMixin
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import NotFoundError
from ..certificates import CERTIFICATE_SCHEMA_VERSION, CertificateManager
FEATURES_WITH_CERTS_ENABLED = settings.FEATURES.copy()
FEATURES_WITH_CERTS_ENABLED['CERTIFICATES_HTML_VIEW'] = True
CERTIFICATE_JSON = {
u'name': u'Test certificate',
u'description': u'Test description',
u'is_active': True,
u'version': CERTIFICATE_SCHEMA_VERSION,
}
CERTIFICATE_JSON_WITH_SIGNATORIES = {
u'name': u'Test certificate',
u'description': u'Test description',
u'version': CERTIFICATE_SCHEMA_VERSION,
u'course_title': 'Course Title Override',
u'is_active': True,
u'signatories': [
{
"name": "Bob Smith",
"title": "The DEAN.",
"signature_image_path": "/c4x/test/CSS101/asset/Signature.png"
}
]
}
C4X_SIGNATORY_PATH = '/c4x/test/CSS101/asset/Signature{}.png'
SIGNATORY_PATH = 'asset-v1:test+CSS101+SP2017+type@asset+block@Signature{}.png'
# pylint: disable=no-member
class HelperMethods(object):
"""
Mixin that provides useful methods for certificate configuration tests.
"""
def _create_fake_images(self, asset_keys):
"""
Creates fake image files for a list of asset_keys.
"""
for asset_key_string in asset_keys:
asset_key = AssetKey.from_string(asset_key_string)
content = StaticContent(
asset_key, "Fake asset", "image/png", "data",
)
contentstore().save(content)
def _add_course_certificates(self, count=1, signatory_count=0, is_active=False,
asset_path_format=C4X_SIGNATORY_PATH):
"""
Create certificate for the course.
"""
signatories = [
{
'name': 'Name ' + str(i),
'title': 'Title ' + str(i),
'signature_image_path': asset_path_format.format(i),
'id': i
} for i in range(signatory_count)
]
# create images for signatory signatures except the last signatory
self._create_fake_images(signatory['signature_image_path'] for signatory in signatories[:-1])
certificates = [
{
'id': i,
'name': 'Name ' + str(i),
'description': 'Description ' + str(i),
'signatories': signatories,
'version': CERTIFICATE_SCHEMA_VERSION,
'is_active': is_active
} for i in range(count)
]
self.course.certificates = {'certificates': certificates}
self.save_course()
# pylint: disable=no-member
class CertificatesBaseTestCase(object):
"""
Mixin with base test cases for the certificates.
"""
def _remove_ids(self, content):
"""
Remove ids from the response. We cannot predict IDs, because they're
generated randomly.
We use this method to clean up response when creating new certificate.
"""
certificate_id = content.pop("id")
return certificate_id
def test_required_fields_are_absent(self):
"""
Test required fields are absent.
"""
bad_jsons = [
# must have name of the certificate
{
u'description': 'Test description',
u'version': CERTIFICATE_SCHEMA_VERSION
},
# an empty json
{},
]
for bad_json in bad_jsons:
response = self.client.post(
self._url(),
data=json.dumps(bad_json),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 400)
self.assertNotIn("Location", response)
content = json.loads(response.content.decode('utf-8'))
self.assertIn("error", content)
def test_invalid_json(self):
"""
Test invalid json handling.
"""
# Invalid JSON.
invalid_json = u"{u'name': 'Test Name', u'description': 'Test description'," \
u" u'version': " + str(CERTIFICATE_SCHEMA_VERSION) + ", []}"
response = self.client.post(
self._url(),
data=invalid_json,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 400)
self.assertNotIn("Location", response)
content = json.loads(response.content.decode('utf-8'))
self.assertIn("error", content)
def test_certificate_data_validation(self):
#Test certificate schema version
json_data_1 = {
u'version': 100,
u'name': u'Test certificate',
u'description': u'Test description'
}
with self.assertRaises(Exception) as context:
CertificateManager.validate(json_data_1)
self.assertIn(
"Unsupported certificate schema version: 100. Expected version: 1.",
str(context.exception)
)
#Test certificate name is missing
json_data_2 = {
u'version': CERTIFICATE_SCHEMA_VERSION,
u'description': u'Test description'
}
with self.assertRaises(Exception) as context:
CertificateManager.validate(json_data_2)
self.assertIn('must have name of the certificate', str(context.exception))
@ddt.ddt
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
class CertificatesListHandlerTestCase(
EventTestMixin, CourseTestCase, CertificatesBaseTestCase, HelperMethods, UrlResetMixin
):
"""
Test cases for certificates_list_handler.
"""
def setUp(self): # lint-amnesty, pylint: disable=arguments-differ
"""
Set up CertificatesListHandlerTestCase.
"""
super(CertificatesListHandlerTestCase, self).setUp('cms.djangoapps.contentstore.views.certificates.tracker') # lint-amnesty, pylint: disable=super-with-arguments
self.reset_urls()
def _url(self):
"""
Return url for the handler.
"""
return reverse_course_url('certificates_list_handler', self.course.id)
def test_can_create_certificate(self):
"""
Test that you can create a certificate.
"""
expected = {
u'version': CERTIFICATE_SCHEMA_VERSION,
u'name': u'Test certificate',
u'description': u'Test description',
u'is_active': True,
u'signatories': []
}
response = self.client.ajax_post(
self._url(),
data=CERTIFICATE_JSON
)
self.assertEqual(response.status_code, 201)
self.assertIn("Location", response)
content = json.loads(response.content.decode('utf-8'))
certificate_id = self._remove_ids(content)
self.assertEqual(content, expected)
self.assert_event_emitted(
'edx.certificate.configuration.created',
course_id=six.text_type(self.course.id),
configuration_id=certificate_id,
)
def test_cannot_create_certificate_if_user_has_no_write_permissions(self):
"""
Tests user without write permissions on course should not able to create certificate
"""
user = UserFactory()
self.client.login(username=user.username, password='test')
response = self.client.ajax_post(
self._url(),
data=CERTIFICATE_JSON
)
self.assertEqual(response.status_code, 403)
@override_settings(LMS_BASE=None)
def test_no_lms_base_for_certificate_web_view_link(self):
test_link = get_lms_link_for_certificate_web_view(
course_key=self.course.id,
mode='honor'
)
self.assertEqual(test_link, None)
@override_settings(LMS_BASE="lms_base_url")
def test_lms_link_for_certificate_web_view(self):
test_url = "//lms_base_url/certificates/" \
"course/" + six.text_type(self.course.id) + '?preview=honor'
link = get_lms_link_for_certificate_web_view(
course_key=self.course.id,
mode='honor'
)
self.assertEqual(link, test_url)
@mock.patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
def test_certificate_info_in_response(self):
"""
Test that certificate has been created and rendered properly with non-audit course mode.
"""
CourseModeFactory.create(course_id=self.course.id, mode_slug='verified')
response = self.client.ajax_post(
self._url(),
data=CERTIFICATE_JSON_WITH_SIGNATORIES
)
self.assertEqual(response.status_code, 201)
# in html response
result = self.client.get_html(self._url())
self.assertContains(result, 'Test certificate')
self.assertContains(result, 'Test description')
# in JSON response
response = self.client.get_json(self._url())
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['name'], 'Test certificate')
self.assertEqual(data[0]['description'], 'Test description')
self.assertEqual(data[0]['version'], CERTIFICATE_SCHEMA_VERSION)
@mock.patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
def test_certificate_info_not_in_response(self):
"""
Test that certificate has not been rendered audit only course mode.
"""
response = self.client.ajax_post(
self._url(),
data=CERTIFICATE_JSON_WITH_SIGNATORIES
)
self.assertEqual(response.status_code, 201)
# in html response
result = self.client.get_html(self._url())
self.assertNotContains(result, 'Test certificate')
def test_unsupported_http_accept_header(self):
"""
Test if not allowed header present in request.
"""
response = self.client.get(
self._url(),
HTTP_ACCEPT="text/plain",
)
self.assertEqual(response.status_code, 406)
def test_certificate_unsupported_method(self):
"""
Unit Test: test_certificate_unsupported_method
"""
resp = self.client.put(self._url())
self.assertEqual(resp.status_code, 405)
def test_not_permitted(self):
"""
Test that when user has not read access to course then permission denied exception should raised.
"""
test_user_client, test_user = self.create_non_staff_authed_user_client()
CourseEnrollment.enroll(test_user, self.course.id)
response = test_user_client.ajax_post(
self._url(),
data=CERTIFICATE_JSON
)
self.assertContains(response, "error", status_code=403)
def test_audit_course_mode_is_skipped(self):
"""
Tests audit course mode is skipped when rendering certificates page.
"""
CourseModeFactory.create(course_id=self.course.id)
CourseModeFactory.create(course_id=self.course.id, mode_slug='verified')
response = self.client.get_html(
self._url(),
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'verified')
self.assertNotContains(response, 'audit')
def test_audit_only_disables_cert(self):
"""
Tests audit course mode is skipped when rendering certificates page.
"""
CourseModeFactory.create(course_id=self.course.id, mode_slug='audit')
response = self.client.get_html(
self._url(),
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This course does not use a mode that offers certificates.')
self.assertNotContains(response, 'This module is not enabled.')
self.assertNotContains(response, 'Loading')
@ddt.data(
['audit', 'verified'],
['verified'],
['audit', 'verified', 'credit'],
['verified', 'credit'],
['professional']
)
def test_non_audit_enables_cert(self, slugs):
"""
Tests audit course mode is skipped when rendering certificates page.
"""
for slug in slugs:
CourseModeFactory.create(course_id=self.course.id, mode_slug=slug)
response = self.client.get_html(
self._url(),
)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'This course does not use a mode that offers certificates.')
self.assertNotContains(response, 'This module is not enabled.')
self.assertContains(response, 'Loading')
def test_assign_unique_identifier_to_certificates(self):
"""
Test certificates have unique ids
"""
self._add_course_certificates(count=2)
json_data = {
u'version': CERTIFICATE_SCHEMA_VERSION,
u'name': u'New test certificate',
u'description': u'New test description',
u'is_active': True,
u'signatories': []
}
response = self.client.post(
self._url(),
data=json.dumps(json_data),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
new_certificate = json.loads(response.content.decode('utf-8'))
for prev_certificate in self.course.certificates['certificates']:
self.assertNotEqual(new_certificate.get('id'), prev_certificate.get('id'))
@ddt.ddt
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
class CertificatesDetailHandlerTestCase(
EventTestMixin, CourseTestCase, CertificatesBaseTestCase, HelperMethods, UrlResetMixin
):
"""
Test cases for CertificatesDetailHandlerTestCase.
"""
_id = 0
def setUp(self): # pylint: disable=arguments-differ
"""
Set up CertificatesDetailHandlerTestCase.
"""
super(CertificatesDetailHandlerTestCase, self).setUp('cms.djangoapps.contentstore.views.certificates.tracker') # lint-amnesty, pylint: disable=super-with-arguments
self.reset_urls()
def _url(self, cid=-1):
"""
Return url for the handler.
"""
cid = cid if cid > 0 else self._id
return reverse_course_url(
'certificates_detail_handler',
self.course.id,
kwargs={'certificate_id': cid},
)
def test_can_create_new_certificate_if_it_does_not_exist(self):
"""
PUT/POST new certificate.
"""
expected = {
u'id': 666,
u'version': CERTIFICATE_SCHEMA_VERSION,
u'name': u'Test certificate',
u'description': u'Test description',
u'is_active': True,
u'course_title': u'Course Title Override',
u'signatories': []
}
response = self.client.put(
self._url(cid=666),
data=json.dumps(expected),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, expected)
self.assert_event_emitted(
'edx.certificate.configuration.created',
course_id=six.text_type(self.course.id),
configuration_id=666,
)
def test_can_edit_certificate(self):
"""
Edit certificate, check its id and modified fields.
"""
self._add_course_certificates(count=2)
expected = {
u'id': 1,
u'version': CERTIFICATE_SCHEMA_VERSION,
u'name': u'New test certificate',
u'description': u'New test description',
u'is_active': True,
u'course_title': u'Course Title Override',
u'signatories': []
}
response = self.client.put(
self._url(cid=1),
data=json.dumps(expected),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, expected)
self.assert_event_emitted(
'edx.certificate.configuration.modified',
course_id=six.text_type(self.course.id),
configuration_id=1,
)
self.reload_course()
# Verify that certificate is properly updated in the course.
course_certificates = self.course.certificates['certificates']
self.assertEqual(len(course_certificates), 2)
self.assertEqual(course_certificates[1].get('name'), u'New test certificate')
self.assertEqual(course_certificates[1].get('description'), 'New test description')
def test_can_edit_certificate_without_is_active(self):
"""
Tests user should be able to edit certificate, if is_active attribute is not present
for given certificate. Old courses might not have is_active attribute in certificate data.
"""
certificates = [
{
'id': 1,
'name': 'certificate with is_active',
'description': 'Description ',
'signatories': [],
'version': CERTIFICATE_SCHEMA_VERSION,
}
]
self.course.certificates = {'certificates': certificates}
self.save_course()
expected = {
u'id': 1,
u'version': CERTIFICATE_SCHEMA_VERSION,
u'name': u'New test certificate',
u'description': u'New test description',
u'is_active': True,
u'course_title': u'Course Title Override',
u'signatories': []
}
response = self.client.post(
self._url(cid=1),
data=json.dumps(expected),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 201)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, expected)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_can_delete_certificate_with_signatories(self, signatory_path):
"""
Delete certificate
"""
self._add_course_certificates(count=2, signatory_count=1, asset_path_format=signatory_path)
response = self.client.delete(
self._url(cid=1),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
self.assert_event_emitted(
'edx.certificate.configuration.deleted',
course_id=six.text_type(self.course.id),
configuration_id='1',
)
self.reload_course()
# Verify that certificates are properly updated in the course.
certificates = self.course.certificates['certificates']
self.assertEqual(len(certificates), 1)
self.assertEqual(certificates[0].get('name'), 'Name 0')
self.assertEqual(certificates[0].get('description'), 'Description 0')
def test_can_delete_certificate_with_slash_prefix_signatory(self):
"""
Delete certificate
"""
self._add_course_certificates(count=2, signatory_count=1, asset_path_format="/" + SIGNATORY_PATH)
response = self.client.delete(
self._url(cid=1),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
self.assert_event_emitted(
'edx.certificate.configuration.deleted',
course_id=six.text_type(self.course.id),
configuration_id='1',
)
self.reload_course()
# Verify that certificates are properly updated in the course.
certificates = self.course.certificates['certificates']
self.assertEqual(len(certificates), 1)
self.assertEqual(certificates[0].get('name'), 'Name 0')
self.assertEqual(certificates[0].get('description'), 'Description 0')
@ddt.data("not_a_valid_asset_key{}.png", "/not_a_valid_asset_key{}.png")
def test_can_delete_certificate_with_invalid_signatory(self, signatory_path):
"""
Delete certificate
"""
self._add_course_certificates(count=2, signatory_count=1, asset_path_format=signatory_path)
response = self.client.delete(
self._url(cid=1),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
self.assert_event_emitted(
'edx.certificate.configuration.deleted',
course_id=six.text_type(self.course.id),
configuration_id='1',
)
self.reload_course()
# Verify that certificates are properly updated in the course.
certificates = self.course.certificates['certificates']
self.assertEqual(len(certificates), 1)
self.assertEqual(certificates[0].get('name'), 'Name 0')
self.assertEqual(certificates[0].get('description'), 'Description 0')
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_delete_certificate_without_write_permissions(self, signatory_path):
"""
Tests certificate deletion without write permission on course.
"""
self._add_course_certificates(count=2, signatory_count=1, asset_path_format=signatory_path)
user = UserFactory()
self.client.login(username=user.username, password='test')
response = self.client.delete(
self._url(cid=1),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 403)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_delete_certificate_without_global_staff_permissions(self, signatory_path):
"""
Tests deletion of an active certificate without global staff permission on course.
"""
self._add_course_certificates(count=2, signatory_count=1, is_active=True, asset_path_format=signatory_path)
user = UserFactory()
for role in [CourseInstructorRole, CourseStaffRole]:
role(self.course.id).add_users(user)
self.client.login(username=user.username, password='test')
response = self.client.delete(
self._url(cid=1),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 403)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_update_active_certificate_without_global_staff_permissions(self, signatory_path):
"""
Tests update of an active certificate without global staff permission on course.
"""
self._add_course_certificates(count=2, signatory_count=1, is_active=True, asset_path_format=signatory_path)
cert_data = {
u'id': 1,
u'version': CERTIFICATE_SCHEMA_VERSION,
u'name': u'New test certificate',
u'description': u'New test description',
u'course_title': u'Course Title Override',
u'org_logo_path': '',
u'is_active': False,
u'signatories': []
}
user = UserFactory()
for role in [CourseInstructorRole, CourseStaffRole]:
role(self.course.id).add_users(user)
self.client.login(username=user.username, password='test')
response = self.client.put(
self._url(cid=1),
data=json.dumps(cert_data),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 403)
def test_delete_non_existing_certificate(self):
"""
Try to delete a non existing certificate. It should return status code 404 Not found.
"""
self._add_course_certificates(count=2)
response = self.client.delete(
self._url(cid=100),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 404)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_can_delete_signatory(self, signatory_path):
"""
Delete an existing certificate signatory
"""
self._add_course_certificates(count=2, signatory_count=3, asset_path_format=signatory_path)
certificates = self.course.certificates['certificates']
signatory = certificates[1].get("signatories")[1]
image_asset_location = AssetKey.from_string(signatory['signature_image_path'])
content = contentstore().find(image_asset_location)
self.assertIsNotNone(content)
test_url = '{}/signatories/1'.format(self._url(cid=1))
response = self.client.delete(
test_url,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
self.reload_course()
# Verify that certificates are properly updated in the course.
certificates = self.course.certificates['certificates']
self.assertEqual(len(certificates[1].get("signatories")), 2)
# make sure signatory signature image is deleted too
self.assertRaises(NotFoundError, contentstore().find, image_asset_location)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_deleting_signatory_without_signature(self, signatory_path):
"""
Delete an signatory whose signature image is already removed or does not exist
"""
self._add_course_certificates(count=2, signatory_count=4, asset_path_format=signatory_path)
test_url = '{}/signatories/3'.format(self._url(cid=1))
response = self.client.delete(
test_url,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
def test_delete_signatory_non_existing_certificate(self):
"""
Try to delete a non existing certificate signatory. It should return status code 404 Not found.
"""
self._add_course_certificates(count=2)
test_url = '{}/signatories/1'.format(self._url(cid=100))
response = self.client.delete(
test_url,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 404)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_certificate_activation_success(self, signatory_path):
"""
Activate and Deactivate the course certificate
"""
test_url = reverse_course_url('certificate_activation_handler', self.course.id)
self._add_course_certificates(count=1, signatory_count=2, asset_path_format=signatory_path)
is_active = True
for i in range(2):
if i == 1:
is_active = not is_active
response = self.client.post(
test_url,
data=json.dumps({"is_active": is_active}),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 200)
course = self.store.get_course(self.course.id)
certificates = course.certificates['certificates']
self.assertEqual(certificates[0].get('is_active'), is_active)
cert_event_type = 'activated' if is_active else 'deactivated'
self.assert_event_emitted(
'.'.join(['edx.certificate.configuration', cert_event_type]),
course_id=six.text_type(self.course.id),
)
@ddt.data(*itertools.product([True, False], [C4X_SIGNATORY_PATH, SIGNATORY_PATH]))
@ddt.unpack
def test_certificate_activation_without_write_permissions(self, activate, signatory_path):
"""
Tests certificate Activate and Deactivate should not be allowed if user
does not have write permissions on course.
"""
test_url = reverse_course_url('certificate_activation_handler', self.course.id)
self._add_course_certificates(count=1, signatory_count=2, asset_path_format=signatory_path)
user = UserFactory()
self.client.login(username=user.username, password='test')
response = self.client.post(
test_url,
data=json.dumps({"is_active": activate}),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 403)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_certificate_activation_failure(self, signatory_path):
"""
Certificate activation should fail when user has not read access to course then permission denied exception
should raised.
"""
test_url = reverse_course_url('certificate_activation_handler', self.course.id)
test_user_client, test_user = self.create_non_staff_authed_user_client()
CourseEnrollment.enroll(test_user, self.course.id)
self._add_course_certificates(count=1, signatory_count=2, asset_path_format=signatory_path)
response = test_user_client.post(
test_url,
data=json.dumps({"is_active": True}),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 403)
course = self.store.get_course(self.course.id)
certificates = course.certificates['certificates']
self.assertEqual(certificates[0].get('is_active'), False)
| stvstnfrd/edx-platform | cms/djangoapps/contentstore/views/tests/test_certificates.py | Python | agpl-3.0 | 31,931 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("popolo", "0002_update_models_from_upstream"),
("results", "0010_resultevent_winner_party_new"),
]
operations = [
migrations.AddField(
model_name="resultevent",
name="post_new",
field=models.ForeignKey(
blank=True,
to="popolo.Post",
null=True,
on_delete=models.CASCADE,
),
)
]
| DemocracyClub/yournextrepresentative | ynr/apps/results/migrations/0011_resultevent_post_new.py | Python | agpl-3.0 | 539 |
import os
from importlib import import_module
def import_module_attr(path):
package, module = path.rsplit('.', 1)
return getattr(import_module(package), module)
settings = import_module_attr(
os.getenv('COMMENTS_MODERATION_SETTINGS_MODULE', 'django.conf.settings')
)
MODERATION_MODE = getattr(settings, 'COMMENTS_MODERATION_MODE', 'approve')
| PetrDlouhy/django-comments-moderation | comments_moderation/settings.py | Python | agpl-3.0 | 358 |
# coding=utf-8
from __future__ import unicode_literals, print_function
from flask import request, jsonify, url_for
from flask_login import current_user
import bugsnag
from . import load
from webhookdb.tasks.pull_request_file import spawn_page_tasks_for_pull_request_files
@load.route('/repos/<owner>/<repo>/pulls/<int:number>/files', methods=["POST"])
def pull_request_files(owner, repo, number):
"""
Queue tasks to load the pull request files (diffs) for a single pull request
into WebhookDB.
:statuscode 202: task successfully queued
"""
bugsnag_ctx = {"owner": owner, "repo": repo, "number": number}
bugsnag.configure_request(meta_data=bugsnag_ctx)
children = bool(request.args.get("children", False))
result = spawn_page_tasks_for_pull_request_files.delay(
owner, repo, number, children=children,
requestor_id=current_user.get_id(),
)
resp = jsonify({"message": "queued"})
resp.status_code = 202
resp.headers["Location"] = url_for("tasks.status", task_id=result.id)
return resp
| singingwolfboy/webhookdb | webhookdb/load/pull_request_file.py | Python | agpl-3.0 | 1,058 |
"""
Tests for SGA utility functions
"""
import pytest
import pytz
from edx_sga.tests.common import is_near_now
from edx_sga.utils import is_finalized_submission, utcnow
@pytest.mark.parametrize(
"submission_data,expected_value",
[
({"answer": {"finalized": True}}, True),
({"answer": {"filename": "file.txt"}}, True),
({"answer": {}}, True),
({"answer": {"finalized": False}}, False),
({"answer": None}, False),
({}, False),
],
)
def test_is_finalized_submission(submission_data, expected_value):
"""Test for is_finalized_submission"""
assert is_finalized_submission(submission_data) is expected_value
def test_utcnow():
"""
tznow should return a datetime object in UTC
"""
now = utcnow()
assert is_near_now(now)
assert now.tzinfo.zone == pytz.utc.zone
| mitodl/edx-sga | edx_sga/tests/test_utils.py | Python | agpl-3.0 | 851 |
"""
Django REST Framework serializers for the User API Accounts sub-application
"""
import json
import logging
from rest_framework import serializers
from django.contrib.auth.models import User
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.urls import reverse
from six import text_type
from lms.djangoapps.badges.utils import badges_enabled
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.user_api import errors
from openedx.core.djangoapps.user_api.models import (
RetirementState,
UserPreference,
UserRetirementStatus
)
from openedx.core.djangoapps.user_api.serializers import ReadOnlyFieldsSerializerMixin
from student.models import UserProfile, LanguageProficiency, SocialLink
from . import (
NAME_MIN_LENGTH, ACCOUNT_VISIBILITY_PREF_KEY, PRIVATE_VISIBILITY,
ALL_USERS_VISIBILITY,
)
from .image_helpers import get_profile_image_urls_for_user
from .utils import validate_social_link, format_social_link
PROFILE_IMAGE_KEY_PREFIX = 'image_url'
LOGGER = logging.getLogger(__name__)
class LanguageProficiencySerializer(serializers.ModelSerializer):
"""
Class that serializes the LanguageProficiency model for account
information.
"""
class Meta(object):
model = LanguageProficiency
fields = ("code",)
def get_identity(self, data):
"""
This is used in bulk updates to determine the identity of an object.
The default is to use the id of an object, but we want to override that
and consider the language code to be the canonical identity of a
LanguageProficiency model.
"""
try:
return data.get('code', None)
except AttributeError:
return None
class SocialLinkSerializer(serializers.ModelSerializer):
"""
Class that serializes the SocialLink model for the UserProfile object.
"""
class Meta(object):
model = SocialLink
fields = ("platform", "social_link")
class UserReadOnlySerializer(serializers.Serializer):
"""
Class that serializes the User model and UserProfile model together.
"""
def __init__(self, *args, **kwargs):
# Don't pass the 'configuration' arg up to the superclass
self.configuration = kwargs.pop('configuration', None)
if not self.configuration:
self.configuration = settings.ACCOUNT_VISIBILITY_CONFIGURATION
# Don't pass the 'custom_fields' arg up to the superclass
self.custom_fields = kwargs.pop('custom_fields', [])
super(UserReadOnlySerializer, self).__init__(*args, **kwargs)
def to_representation(self, user):
"""
Overwrite to_native to handle custom logic since we are serializing two models as one here
:param user: User object
:return: Dict serialized account
"""
try:
user_profile = user.profile
except ObjectDoesNotExist:
user_profile = None
LOGGER.warning("user profile for the user [%s] does not exist", user.username)
accomplishments_shared = badges_enabled()
data = {
"username": user.username,
"url": self.context.get('request').build_absolute_uri(
reverse('accounts_api', kwargs={'username': user.username})
),
"email": user.email,
# For backwards compatibility: Tables created after the upgrade to Django 1.8 will save microseconds.
# However, mobile apps are not expecting microsecond in the serialized value. If we set it to zero the
# DRF JSONEncoder will not include it in the serialized value.
# https://docs.djangoproject.com/en/1.8/ref/databases/#fractional-seconds-support-for-time-and-datetime-fields
"date_joined": user.date_joined.replace(microsecond=0),
"is_active": user.is_active,
"bio": None,
"country": None,
"profile_image": None,
"language_proficiencies": None,
"name": None,
"name_en": None,
"gender": None,
"goals": None,
"year_of_birth": None,
"level_of_education": None,
"mailing_address": None,
"requires_parental_consent": None,
"accomplishments_shared": accomplishments_shared,
"account_privacy": self.configuration.get('default_visibility'),
"social_links": None,
"extended_profile_fields": None,
}
if user_profile:
data.update(
{
"bio": AccountLegacyProfileSerializer.convert_empty_to_None(user_profile.bio),
"country": AccountLegacyProfileSerializer.convert_empty_to_None(user_profile.country.code),
"profile_image": AccountLegacyProfileSerializer.get_profile_image(
user_profile, user, self.context.get('request')
),
"language_proficiencies": LanguageProficiencySerializer(
user_profile.language_proficiencies.all(), many=True
).data,
"name": user_profile.name,
"name_en": user_profile.name_en,
"gender": AccountLegacyProfileSerializer.convert_empty_to_None(user_profile.gender),
"goals": user_profile.goals,
"year_of_birth": user_profile.year_of_birth,
"level_of_education": AccountLegacyProfileSerializer.convert_empty_to_None(
user_profile.level_of_education
),
"mailing_address": user_profile.mailing_address,
"requires_parental_consent": user_profile.requires_parental_consent(),
"account_privacy": get_profile_visibility(user_profile, user, self.configuration),
"social_links": SocialLinkSerializer(
user_profile.social_links.all(), many=True
).data,
"extended_profile": get_extended_profile(user_profile),
}
)
if self.custom_fields:
fields = self.custom_fields
elif user_profile:
fields = _visible_fields(user_profile, user, self.configuration)
else:
fields = self.configuration.get('public_fields')
return self._filter_fields(
fields,
data
)
def _filter_fields(self, field_whitelist, serialized_account):
"""
Filter serialized account Dict to only include whitelisted keys
"""
visible_serialized_account = {}
for field_name in field_whitelist:
visible_serialized_account[field_name] = serialized_account.get(field_name, None)
return visible_serialized_account
class AccountUserSerializer(serializers.HyperlinkedModelSerializer, ReadOnlyFieldsSerializerMixin):
"""
Class that serializes the portion of User model needed for account information.
"""
class Meta(object):
model = User
fields = ("username", "email", "date_joined", "is_active")
read_only_fields = ("username", "email", "date_joined", "is_active")
explicit_read_only_fields = ()
class AccountLegacyProfileSerializer(serializers.HyperlinkedModelSerializer, ReadOnlyFieldsSerializerMixin):
"""
Class that serializes the portion of UserProfile model needed for account information.
"""
profile_image = serializers.SerializerMethodField("_get_profile_image")
requires_parental_consent = serializers.SerializerMethodField()
language_proficiencies = LanguageProficiencySerializer(many=True, required=False)
social_links = SocialLinkSerializer(many=True, required=False)
name_en = serializers.CharField(required=False)
class Meta(object):
model = UserProfile
fields = (
"name", "name_en", "gender", "goals", "year_of_birth", "level_of_education", "country", "social_links",
"mailing_address", "bio", "profile_image", "requires_parental_consent", "language_proficiencies"
)
# Currently no read-only field, but keep this so view code doesn't need to know.
read_only_fields = ()
explicit_read_only_fields = ("profile_image", "requires_parental_consent")
def validate_name(self, new_name):
""" Enforce minimum length for name. """
if len(new_name) < NAME_MIN_LENGTH:
raise serializers.ValidationError(
"The name field must be at least {} characters long.".format(NAME_MIN_LENGTH)
)
return new_name
def validate_language_proficiencies(self, value):
"""
Enforce all languages are unique.
"""
language_proficiencies = [language for language in value]
unique_language_proficiencies = set(language["code"] for language in language_proficiencies)
if len(language_proficiencies) != len(unique_language_proficiencies):
raise serializers.ValidationError("The language_proficiencies field must consist of unique languages.")
return value
def validate_social_links(self, value):
"""
Enforce only one entry for a particular social platform.
"""
social_links = [social_link for social_link in value]
unique_social_links = set(social_link["platform"] for social_link in social_links)
if len(social_links) != len(unique_social_links):
raise serializers.ValidationError("The social_links field must consist of unique social platforms.")
return value
def transform_gender(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_country(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_level_of_education(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_bio(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
@staticmethod
def convert_empty_to_None(value):
"""
Helper method to convert empty string to None (other values pass through).
"""
return None if value == "" else value
@staticmethod
def get_profile_image(user_profile, user, request=None):
"""
Returns metadata about a user's profile image.
"""
data = {'has_image': user_profile.has_profile_image}
urls = get_profile_image_urls_for_user(user, request)
data.update({
'{image_key_prefix}_{size}'.format(image_key_prefix=PROFILE_IMAGE_KEY_PREFIX, size=size_display_name): url
for size_display_name, url in urls.items()
})
return data
def get_requires_parental_consent(self, user_profile):
"""
Returns a boolean representing whether the user requires parental controls.
"""
return user_profile.requires_parental_consent()
def _get_profile_image(self, user_profile):
"""
Returns metadata about a user's profile image
This protected method delegates to the static 'get_profile_image' method
because 'serializers.SerializerMethodField("_get_profile_image")' will
call the method with a single argument, the user_profile object.
"""
return AccountLegacyProfileSerializer.get_profile_image(user_profile, user_profile.user)
def update(self, instance, validated_data):
"""
Update the profile, including nested fields.
Raises:
errors.AccountValidationError: the update was not attempted because validation errors were found with
the supplied update
"""
language_proficiencies = validated_data.pop("language_proficiencies", None)
# Update all fields on the user profile that are writeable,
# except for "language_proficiencies" and "social_links", which we'll update separately
update_fields = set(self.get_writeable_fields()) - set(["language_proficiencies"]) - set(["social_links"])
for field_name in update_fields:
default = getattr(instance, field_name)
field_value = validated_data.get(field_name, default)
setattr(instance, field_name, field_value)
# Update the related language proficiency
if language_proficiencies is not None:
instance.language_proficiencies.all().delete()
instance.language_proficiencies.bulk_create([
LanguageProficiency(user_profile=instance, code=language["code"])
for language in language_proficiencies
])
# Update the user's social links
social_link_data = self._kwargs['data']['social_links'] if 'social_links' in self._kwargs['data'] else None
if social_link_data and len(social_link_data) > 0:
new_social_link = social_link_data[0]
current_social_links = list(instance.social_links.all())
instance.social_links.all().delete()
try:
# Add the new social link with correct formatting
validate_social_link(new_social_link['platform'], new_social_link['social_link'])
formatted_link = format_social_link(new_social_link['platform'], new_social_link['social_link'])
instance.social_links.bulk_create([
SocialLink(user_profile=instance, platform=new_social_link['platform'], social_link=formatted_link)
])
except ValueError as err:
# If we have encountered any validation errors, return them to the user.
raise errors.AccountValidationError({
'social_links': {
"developer_message": u"Error thrown from adding new social link: '{}'".format(text_type(err)),
"user_message": text_type(err)
}
})
# Add back old links unless overridden by new link
for current_social_link in current_social_links:
if current_social_link.platform != new_social_link['platform']:
instance.social_links.bulk_create([
SocialLink(user_profile=instance, platform=current_social_link.platform,
social_link=current_social_link.social_link)
])
instance.save()
return instance
class RetirementUserProfileSerializer(serializers.ModelSerializer):
"""
Serialize a small subset of UserProfile data for use in RetirementStatus APIs
"""
class Meta(object):
model = UserProfile
fields = ('id', 'name')
class RetirementUserSerializer(serializers.ModelSerializer):
"""
Serialize a small subset of User data for use in RetirementStatus APIs
"""
profile = RetirementUserProfileSerializer(read_only=True)
class Meta(object):
model = User
fields = ('id', 'username', 'email', 'profile')
class RetirementStateSerializer(serializers.ModelSerializer):
"""
Serialize a small subset of RetirementState data for use in RetirementStatus APIs
"""
class Meta(object):
model = RetirementState
fields = ('id', 'state_name', 'state_execution_order')
class UserRetirementStatusSerializer(serializers.ModelSerializer):
"""
Perform serialization for the RetirementStatus model
"""
user = RetirementUserSerializer(read_only=True)
current_state = RetirementStateSerializer(read_only=True)
last_state = RetirementStateSerializer(read_only=True)
class Meta(object):
model = UserRetirementStatus
exclude = ['responses', ]
class UserRetirementPartnerReportSerializer(serializers.Serializer):
"""
Perform serialization for the UserRetirementPartnerReportingStatus model
"""
original_username = serializers.CharField()
original_email = serializers.EmailField()
original_name = serializers.CharField()
orgs = serializers.ListField(child=serializers.CharField())
# Required overrides of abstract base class methods, but we don't use them
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
def get_extended_profile(user_profile):
"""
Returns the extended user profile fields stored in user_profile.meta
"""
# pick the keys from the site configuration
extended_profile_field_names = configuration_helpers.get_value('extended_profile_fields', [])
try:
extended_profile_fields_data = json.loads(user_profile.meta)
except ValueError:
extended_profile_fields_data = {}
extended_profile = []
for field_name in extended_profile_field_names:
extended_profile.append({
"field_name": field_name,
"field_value": extended_profile_fields_data.get(field_name, "")
})
return extended_profile
def get_profile_visibility(user_profile, user, configuration=None):
"""
Returns the visibility level for the specified user profile.
"""
if user_profile.requires_parental_consent():
return PRIVATE_VISIBILITY
if not configuration:
configuration = settings.ACCOUNT_VISIBILITY_CONFIGURATION
# Calling UserPreference directly because the requesting user may be different from existing_user
# (and does not have to be is_staff).
profile_privacy = UserPreference.get_value(user, ACCOUNT_VISIBILITY_PREF_KEY)
return profile_privacy if profile_privacy else configuration.get('default_visibility')
def _visible_fields(user_profile, user, configuration=None):
"""
Return what fields should be visible based on user settings
:param user_profile: User profile object
:param user: User object
:param configuration: A visibility configuration dictionary.
:return: whitelist List of fields to be shown
"""
if not configuration:
configuration = settings.ACCOUNT_VISIBILITY_CONFIGURATION
profile_visibility = get_profile_visibility(user_profile, user, configuration)
if profile_visibility == ALL_USERS_VISIBILITY:
return configuration.get('shareable_fields')
else:
return configuration.get('public_fields')
| Edraak/edraak-platform | openedx/core/djangoapps/user_api/accounts/serializers.py | Python | agpl-3.0 | 19,240 |
"""
This test case test the REST API
api/applications.py
"""
import json
from .base import MyApiTestCase
class APIApplicationsResolverTestCase(MyApiTestCase):
def test_get_applications(self):
with self.app.test_request_context('/application/',
method='GET',
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = res.json.get("result")
detail = res.json.get("detail")
value = result.get("value")
self.assertTrue("ssh" in value)
self.assertTrue("luks" in value)
self.assertTrue(value["ssh"]["options"]["optional"] == ["user"])
| privacyidea/privacyidea | tests/test_api_applications.py | Python | agpl-3.0 | 799 |
from django import forms
from django.utils.translation import ugettext_lazy as _
from astrobin.models import SolarSystem_Acquisition
class SolarSystem_AcquisitionForm(forms.ModelForm):
error_css_class = 'error'
date = forms.DateField(
required=False,
input_formats=['%Y-%m-%d'],
widget=forms.TextInput(attrs={'class': 'datepickerclass', 'autocomplete': 'off'}),
help_text=_("Please use the following format: yyyy-mm-dd"),
label=_("Date"),
)
def clean_seeing(self):
data = self.cleaned_data['seeing']
if data and data not in list(range(1, 6)):
raise forms.ValidationError(_("Please enter a value between 1 and 5."))
return data
def clean_transparency(self):
data = self.cleaned_data['transparency']
if data and data not in list(range(1, 11)):
raise forms.ValidationError(_("Please enter a value between 1 and 10."))
return data
class Meta:
model = SolarSystem_Acquisition
fields = (
'date',
'time',
'frames',
'fps',
'exposure_per_frame',
'focal_length',
'cmi',
'cmii',
'cmiii',
'seeing',
'transparency',
)
widgets = {
'date': forms.TextInput(attrs={'class': 'datepickerclass', 'autocomplete': 'off'}),
'time': forms.TextInput(attrs={'class': 'timepickerclass', 'autocomplete': 'off'}),
}
| astrobin/astrobin | astrobin/forms/solar_system_acquisition_form.py | Python | agpl-3.0 | 1,528 |
"""
Data models managers for the user strike app.
"""
from django.db import models
from django.db.models import Q
from django.utils import timezone
class UserStrikeManager(models.Manager):
"""
Manager class for the ``UserStrike`` data model.
"""
use_for_related_fields = True
def search_for_strike(self, user, ip_address):
"""
Search the latest (non expired) strike for the given user or IP address.
:param user: The user instance to search strike for.
:param ip_address: The IP address to search strike for.
"""
# Compute the lookup expression
if not user and not ip_address:
return None
elif user and not ip_address:
strike_lookup = Q(target_user=user)
elif not user and ip_address:
strike_lookup = Q(target_ip_address=ip_address)
else:
strike_lookup = Q(target_user=user) | Q(target_ip_address=ip_address)
# Do the search
return self.filter(Q(expiration_date__isnull=True) | Q(expiration_date__isnull=False,
expiration_date__gte=timezone.now()),
strike_lookup).order_by('-block_access', '-creation_date').first()
| TamiaLab/carnetdumaker | apps/userstrike/managers.py | Python | agpl-3.0 | 1,277 |
from . import base
from . import mainMenu
from . import utils
| Aiacos/DevPyLib | mayaLib/guiLib/__init__.py | Python | agpl-3.0 | 62 |
# -*- coding: utf-8 -*-
"""Reforms controller"""
import collections
from .. import contexts, conv, model, wsgihelpers
@wsgihelpers.wsgify
def api1_reforms(req):
ctx = contexts.Ctx(req)
headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx)
assert req.method == 'GET', req.method
params = req.GET
inputs = dict(
context = params.get('context'),
)
data, errors = conv.pipe(
conv.struct(
dict(
context = conv.noop, # For asynchronous calls
),
default = 'drop',
),
)(inputs, state = ctx)
if errors is not None:
return wsgihelpers.respond_json(ctx,
collections.OrderedDict(sorted(dict(
apiVersion = 1,
context = inputs.get('context'),
error = collections.OrderedDict(sorted(dict(
code = 400, # Bad Request
errors = [conv.jsonify_value(errors)],
message = ctx._(u'Bad parameters in request'),
).iteritems())),
method = req.script_name,
params = inputs,
url = req.url.decode('utf-8'),
).iteritems())),
headers = headers,
)
build_reform_function_by_key = model.build_reform_function_by_key
declared_reforms_key = build_reform_function_by_key.keys() \
if build_reform_function_by_key is not None \
else None
reforms = collections.OrderedDict(sorted({
reform_key: reform.name
for reform_key, reform in model.reform_by_full_key.iteritems()
}.iteritems())) if declared_reforms_key is not None else None
return wsgihelpers.respond_json(ctx,
collections.OrderedDict(sorted(dict(
apiVersion = 1,
context = data['context'],
method = req.script_name,
params = inputs,
reforms = reforms,
url = req.url.decode('utf-8'),
).iteritems())),
headers = headers,
)
| sgmap/openfisca-web-api | openfisca_web_api/controllers/reforms.py | Python | agpl-3.0 | 2,088 |
__doc__ = """
Preprocess input data, include
`video` format should be `avi` or `mp4`
`slide` format should be `pdf` and auto-convert to `jpg` or
a set of `jpg` images
`summary` save `video` and `slide` infomation to summary store
`reducer` reduce video frames to more distingushable one
`reducer.probe` probing video to compute distingushable posibility position
"""
__all__ = ["probe"]
import numpy as np
from lib.exp.base import ExpCommon
from probe import Probe
class Const(object):
__doc__ = "Constants for preprocessing data"
Names = np.array(
["Avg 2", "Avg 10", "Avg 15", "Avg 20",
"Avg 30", "Avg 60", "Avg 300", "Bkg Model"])
Rkeys = np.array(
["diff_next/size_2", "dn/size_10",
"diff_next/size_15", "dn/size_20",
"diff_next/size_30", "diff_next/size_60",
"diff_next/size_300", "diff_bkg"])
Doffsets = np.array([1, 9, 14, 19, 29, 59, 299, 1])
class Reducer(ExpCommon):
def __init__(self, root, name):
ExpCommon.__init__(self, root, name)
def __compress(self, data, keep_last=False):
"""
Get the compressed dataset and compressed ratio
"""
before = len(data)
maxv = data.iloc[0].name
prev = data.iloc[0].name
for curv in data.index[1:]:
if (curv - prev) == 1: # continuous sequence
if data.ix[maxv]['diff'] < data.ix[curv]['diff']:
# drop previous max if its diff smaller
data = data.drop([maxv])
maxv = curv
else: # drop continuous item
data = data.drop([curv])
else: # seperate sequence start
maxv = curv
prev = curv
return data, before, len(data)
def __get_data(self, key):
"""
Get probed `differenc` values and `frame` data
"""
df = self.load(key)
dmean = df["diff"].mean()
can = df[df["diff"].gt(dmean)]
return can, len(df)
def __results(self, **opts):
oo = opts["origin"]
ff = opts["final"]
info = "Original: {}, thresed: {}, final: {}, ratio: {}".\
format(oo, opts["thresed"], ff, ff*1.0/oo)
istr = "Reduce data from {}".format(info)
print istr
self.elog.info(istr)
self.save_rtlog(opts.keys(), opts.values())
def reduce(self, key, save=False):
can, orl = self.__get_data(key)
red, thresed, after = self.__compress(can)
self.__results(key=key, origin=orl, thresed=thresed,
final=after)
if save:
self.save("/nr/{}".format(key), red)
return red
def clear(self):
self.delete_log()
# self.delete_store() # be careful
def frame_ids(self, ikey=4, delta=True):
rk = "/nr/" + Const.Rkeys[ikey]
dfv = self.load(rk).frame_id
if delta:
dfv -= 30
return dfv[dfv > 0].values.astype(np.int32)
def probing(self, qs=2):
pb = Probe(self.root, self.name)
for frame_id, opt in pb.diff_next(qs=qs):
self.elog.info("Qs: {}, FrameID: {}, time: {}".
format(qs, frame_id, opt))
self.save("dn/size_{}".format(qs), pb.pdf)
def batch_probing(self, qss=[2]):
"""
qss: query size list
"""
for qs in qss:
self.probing(qs)
def zipkey(self, keys=[]):
if len(keys) == 0:
keys = range(len(Const.Names))
inxs = range(len(keys))
iz = zip(inxs, Const.Names[keys],
Const.Rkeys[keys], Const.Doffsets[keys])
return iz
| speed-of-light/pyslider | lib/exp/pre/__init__.py | Python | agpl-3.0 | 3,713 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
class GazelleTest(BackendTest):
BACKEND = 'gazelle'
def test_torrent(self):
l = list(self.backend.iter_torrents('sex'))
if len(l) > 0:
self.backend.get_torrent_file(l[0].id)
| eirmag/weboob | modules/gazelle/test.py | Python | agpl-3.0 | 989 |