code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/python -tt
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import subprocess
import os
import shutil
import sys
import hashlib
def _doRunCommand(command, logger, rundir='/tmp', output=subprocess.PIPE, error=subprocess.PIPE, env=None):
"""Run a command and log the output. Error out if we get something on stderr"""
logger.info("Running %s" % subprocess.list2cmdline(command))
p1 = subprocess.Popen(command, cwd=rundir, stdout=output, stderr=error, universal_newlines=True, env=env)
(out, err) = p1.communicate()
if out:
logger.debug(out)
if p1.returncode != 0:
logger.error("Got an error from %s" % command[0])
logger.error(err)
raise OSError, "Got an error from %s: %s" % (command[0], err)
def _link(local, target, logger, force=False):
"""Simple function to link or copy a package, removing target optionally."""
if os.path.exists(target) and force:
os.remove(target)
#check for broken links
if force and os.path.islink(target):
if not os.path.exists(os.readlink(target)):
os.remove(target)
try:
os.link(local, target)
except OSError, e:
if e.errno != 18: # EXDEV
logger.error('Got an error linking from cache: %s' % e)
raise OSError, e
# Can't hardlink cross file systems
shutil.copy2(local, target)
def _ensuredir(target, logger, force=False, clean=False):
"""Ensure that a directory exists, if it already exists, only continue
if force is set."""
# We have to check existance of a logger, as setting the logger could
# itself cause an issue.
def whoops(func, path, exc_info):
message = 'Could not remove %s' % path
if logger:
logger.error(message)
else:
sys.stderr(message)
sys.exit(1)
if os.path.exists(target) and not os.path.isdir(target):
message = '%s exists but is not a directory.' % target
if logger:
logger.error(message)
else:
sys.stderr(message)
sys.exit(1)
if not os.path.isdir(target):
os.makedirs(target)
elif force and clean:
shutil.rmtree(target, onerror=whoops)
os.makedirs(target)
elif force:
return
else:
message = 'Directory %s already exists. Use --force to overwrite.' % target
if logger:
logger.error(message)
else:
sys.stderr(message)
sys.exit(1)
def _doCheckSum(path, hash, logger):
"""Generate a checksum hash from a provided path.
Return a string of type:hash"""
# Try to figure out what hash we want to do
try:
sum = hashlib.new(hash)
except ValueError:
logger.error("Invalid hash type: %s" % hash)
return False
# Try to open the file, using binary flag.
try:
myfile = open(path, 'rb')
except IOError, e:
logger.error("Could not open file %s: %s" % (path, e))
return False
# Loop through the file reading chunks at a time as to not
# put the entire file in memory. That would suck for DVDs
while True:
chunk = myfile.read(8192) # magic number! Taking suggestions for better blocksize
if not chunk:
break # we're done with the file
sum.update(chunk)
myfile.close()
return '%s:%s' % (hash, sum.hexdigest())
| ykhodorkovskiy/clip | packages/pungi/pungi-2.13/src/pypungi/util.py | Python | apache-2.0 | 4,027 |
from math import log
import time
import sys
import glob
from serial import Serial
from serial import SerialException
# -*- coding:utf-8 -*-
__author__ = 'Jerry'
connected = 0
serial_port = 0
frame_head = 0xffaa # frame header
def serial_ports():
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(20)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = Serial(port)
s.close()
result.append(port)
except (OSError, SerialException):
pass
return result
def connect_serial(port_set="COM1", baud_rate_set=9600):
global serial_port
try:
serial_port = Serial(port=port_set, baudrate=baud_rate_set)
except (OSError, SerialException):
return False
global connected
connected = 1
return True
def send_frame(data, repeat=1):
if data == 0:
length = 1
else:
length = int(log(data, 256))+1
lister = []
# add the data
for i in range(length):
bitwise = 0xff << i*8
lister.append(chr((data & bitwise) >> 8*i)) # 8 bit convert
# add the head
head_length = int(log(frame_head, 256))+1
for i in range(head_length):
bitwise = 0xff << i*8
lister.append(chr((frame_head & bitwise) >> 8*i))
lister.reverse()
# add the checksum 1byte
check_sum = ord(lister[0])
for c in lister:
if c is not lister[0]:
check_sum ^= ord(c)
lister.append(chr(check_sum))
# send to the serial
if connected == 1:
for i in range(int(repeat)):
serial_port.write(lister)
time.sleep(0.001)
def receive_data():
reads = []
if connected:
while serial_port.inWaiting() > 0:
if serial_port.inWaiting() > 0:
reads.append(ord(serial_port.read(1)))
time.sleep(0.001)
return reads
| jerrymomo10/Rfid_PCTest | parase.py | Python | gpl-2.0 | 2,439 |
import pytest
from pexpect import TIMEOUT
from tests.functional.utils import spawn, functional, bare
envs = ((u'bash', 'ubuntu-bash', u'''
FROM ubuntu:latest
RUN apt-get update
RUN apt-get install -yy bash
'''), (u'bash', 'generic-bash', u'''
FROM fedora:latest
RUN dnf install -yy python-devel sudo which gcc
'''))
@functional
@pytest.mark.skipif(
bool(bare), reason="Can't be tested in bare run")
@pytest.mark.parametrize('shell, tag, dockerfile', envs)
def test_installation(request, shell, tag, dockerfile):
proc = spawn(request, tag, dockerfile, shell, install=False)
proc.sendline(u'cat /src/install.sh | sh - && $0')
proc.sendline(u'thefuck --version')
assert proc.expect([TIMEOUT, u'The Fuck'], timeout=600)
proc.sendline(u'fuck')
assert proc.expect([TIMEOUT, u'No fucks given'])
| bigplus/thefuck | tests/functional/test_install.py | Python | mit | 819 |
# -*- coding: utf-8 -*-
from haproxy import filters
import pytest
@pytest.mark.parametrize(
'to_filter, to_check, result',
[
('1.2.3.4', '1.2.3.4', True),
('2.3.4.5', '5.3.5.4', False),
('2001:db8::8a2e:370:7334', '2001:db8::8a2e:370:7334', True),
('2001:db8::8a2e:370:7334', '2001:db8::8a2e:456:7321', False),
],
)
def test_filter_ip(line_factory, to_filter, to_check, result):
"""Check that filter_ip filter works as expected."""
current_filter = filters.filter_ip(to_filter)
headers = f' {{{to_check}}}'
line = line_factory(headers=headers)
assert current_filter(line) is result
@pytest.mark.parametrize(
'to_filter, to_check, result',
[
('1.2.3', '1.2.3.4', True),
('1.2.3', '1.2.3.78', True),
('2.3.4.5', '5.3.5.4', False),
('2001:db8', '2001:db8::8a2e:370:7334', True),
('2001:db8', '2001:db8::8a2e:456:7321', True),
('2134:db8', '2001:db8::8a2e:456:7321', False),
],
)
def test_filter_ip_range(line_factory, to_filter, to_check, result):
"""Check that filter_ip_range filter works as expected."""
current_filter = filters.filter_ip_range(to_filter)
headers = f' {{{to_check}}}'
line = line_factory(headers=headers)
assert current_filter(line) is result
@pytest.mark.parametrize(
'path, result',
[
('/path/to/image', True),
('/something/else', False),
('/another/image/here', True),
],
)
def test_filter_path(line_factory, path, result):
"""Check that filter_path filter works as expected."""
current_filter = filters.filter_path('/image')
http_request = f'GET {path} HTTP/1.1'
line = line_factory(http_request=http_request)
assert current_filter(line) is result
@pytest.mark.parametrize(
'path, result',
[
('/ssl_path:443/image', True),
('/something/else', False),
('/another:443/ssl', True),
],
)
def test_filter_ssl(line_factory, path, result):
"""Check that filter_path filter works as expected."""
current_filter = filters.filter_ssl()
http_request = f'GET {path} HTTP/1.1'
line = line_factory(http_request=http_request)
assert current_filter(line) is result
@pytest.mark.parametrize('tr, result', [(45, False), (13000, True), (4566, False),])
def test_filter_slow_requests(line_factory, tr, result):
"""Check that filter_slow_requests filter works as expected."""
current_filter = filters.filter_slow_requests('10000')
line = line_factory(tr=tr)
assert current_filter(line) is result
@pytest.mark.parametrize('tw, result', [(45, True), (13000, False), (4566, False),])
def test_filter_wait_on_queues(line_factory, tw, result):
"""Check that filter_wait_on_queues filter works as expected"""
current_filter = filters.filter_wait_on_queues('50')
line = line_factory(tw=tw)
assert current_filter(line) is result
@pytest.mark.parametrize(
'to_filter, to_check, result',
[
('200', '200', True),
('200', '230', False),
('300', '300', True),
('300', '400', False),
],
)
def test_filter_status_code(line_factory, to_filter, to_check, result):
"""Test that the status_code filter works as expected."""
current_filter = filters.filter_status_code(to_filter)
line = line_factory(status=to_check)
assert current_filter(line) is result
@pytest.mark.parametrize(
'to_filter, to_check, result',
[
('2', '200', True),
('2', '230', True),
('2', '300', False),
('3', '300', True),
('3', '330', True),
('3', '400', False),
],
)
def test_filter_status_code_family(line_factory, to_filter, to_check, result):
"""Test that the status_code_family filter works as expected."""
current_filter = filters.filter_status_code_family(to_filter)
line = line_factory(status=to_check)
assert current_filter(line) is result
@pytest.mark.parametrize(
'to_filter, to_check, result',
[
('GET', 'GET', True),
('GET', 'POST', False),
('GET', 'PUT', False),
('GET', 'PATCH', False),
('GET', 'DELETE', False),
('PATCH', 'PATCH', True),
('DELETE', 'DELETE', True),
],
)
def test_filter_http_method(line_factory, to_filter, to_check, result):
"""Test that the http_method filter works as expected."""
current_filter = filters.filter_http_method(to_filter)
line = line_factory(http_request=f'{to_check} /path HTTP/1.1')
assert current_filter(line) is result
@pytest.mark.parametrize(
'to_filter, to_check, result',
[
('default', 'default', True),
('default', 'backend', False),
('backend', 'backend', True),
('backend', 'default', False),
],
)
def test_filter_backend(line_factory, to_filter, to_check, result):
"""Test that the backend filter works as expected."""
current_filter = filters.filter_backend(to_filter)
line = line_factory(backend_name=to_check)
assert current_filter(line) is result
@pytest.mark.parametrize(
'to_filter, to_check, result',
[
('varnish', 'varnish', True),
('varnish', 'nginx', False),
('nginx', 'nginx', True),
('nginx', 'varnish', False),
],
)
def test_filter_frontend(line_factory, to_filter, to_check, result):
"""Test that the frontend filter works as expected."""
current_filter = filters.filter_frontend(to_filter)
line = line_factory(frontend_name=to_check)
assert current_filter(line) is result
@pytest.mark.parametrize(
'to_filter, to_check, result',
[
('server1', 'server1', True),
('server1', 'backend23', False),
('backend23', 'backend23', True),
('backend23', 'server1', False),
],
)
def test_filter_server(line_factory, to_filter, to_check, result):
"""Test that the server filter works as expected."""
current_filter = filters.filter_server(to_filter)
line = line_factory(server_name=to_check)
assert current_filter(line) is result
@pytest.mark.parametrize(
'to_filter, to_check, result',
[
('400', '500', True),
('400', '+500', True),
('+400', '500', True),
('+400', '+500', True),
('400', '300', False),
('400', '+300', False),
('+400', '300', False),
('+400', '+300', False),
],
)
def test_filter_response_size(line_factory, to_filter, to_check, result):
"""Test that the size filter works as expected.
Note that both filter and value can have a leading plus sign.
"""
current_filter = filters.filter_response_size(to_filter)
line = line_factory(bytes=to_check)
assert current_filter(line) is result
| gforcada/haproxy_log_analysis | haproxy/tests/test_filters.py | Python | gpl-3.0 | 6,733 |
from __future__ import unicode_literals
import datetime
import re
from decimal import Decimal
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
F, Aggregate, Avg, Count, DecimalField, FloatField, Func, IntegerField,
Max, Min, Sum, Value,
)
from django.test import TestCase, ignore_warnings
from django.test.utils import Approximate, CaptureQueriesContext
from django.utils import six, timezone
from django.utils.deprecation import RemovedInDjango20Warning
from .models import Author, Book, Publisher, Store
class AggregateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["age__sum"], 254)
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2)
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2)
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__rating__avg"], 4.0)
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["publisher__num_awards__sum"], 30)
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__price__sum"], Decimal("270.27"))
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 57)
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__publisher__num_awards__min"], 1)
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2)
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=self.b1.pk)
self.assertEqual(
b.name,
'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_defer(self):
qs = Book.objects.annotate(
page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
rows = [
(1, "159059725", 447, "The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.name)
)
def test_annotate_defer_select_related(self):
qs = Book.objects.select_related('contact').annotate(
page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
rows = [
(1, "159059725", 447, "Adrian Holovaty",
"The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name)
)
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 51.5),
('Practical Django Projects', 29.0),
('Python Web Development with Django', Approximate(30.3, places=1)),
('Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 4.5),
('Brad Dayley', 3.0),
('Jacob Kaplan-Moss', 4.5),
('James Bennett', 4.0),
('Paul Bissex', 4.0),
('Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 1),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 1),
('Peter Norvig', 2),
('Stuart Russell', 1),
('Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 7),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
('Practical Django Projects', 3),
('Python Web Development with Django', 7),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
('Apress', Decimal("59.69")),
("Jonno's House of Books", None),
('Morgan Kaufmann', Decimal("75.00")),
('Prentice Hall', Decimal("112.49")),
('Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg('authors__age')).values('pk', 'isbn', 'mean_age')
self.assertEqual(
list(books), [
{
"pk": 1,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books), [
{
"name": "The Definitive Guide to Django: Web Development Done Right"
}
]
)
books = Book.objects.filter(pk=self.b1.pk).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.values("rating").annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")).order_by("rating")
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertEqual(len(authors), 9)
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 32.0),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 29.5),
('James Bennett', 34.0),
('Jeffrey Forcier', 27.0),
('Paul Bissex', 31.0),
('Peter Norvig', 46.0),
('Stuart Russell', 57.0),
('Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{
"rating": 4.5,
"oldest": 35,
},
{
"rating": 3.0,
"oldest": 45
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 5.0,
"oldest": 57,
}
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{
"rating": 5.0,
"oldest": 57,
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 3.0,
"oldest": 45,
},
{
"rating": 4.5,
"oldest": 35,
}
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1, book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Sams",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__exact=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = Author.objects.annotate(num_friends=Count("friends__id", distinct=True)).filter(num_friends=0).order_by("pk")
self.assertQuerysetEqual(
authors, [
"Brad Dayley",
],
lambda a: a.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
],
lambda p: p.name
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1)
self.assertQuerysetEqual(
books, [
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1).aggregate(Avg("rating"))
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(earliest_book=Min("book__pubdate")).exclude(earliest_book=None).order_by("earliest_book").values()
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': 4,
'name': 'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': 3,
'name': 'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': 1,
'name': 'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': 2,
'name': 'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("pk", "isbn", "mean_age")
self.assertEqual(
list(books), [
(1, "159059725", 34.5),
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(
list(books), [
('159059725',)
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(
list(books), [
(34.5,)
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age", flat=True)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
def test_dates_with_aggregation(self):
"""
Test that .dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year')
self.assertQuerysetEqual(
dates, [
"datetime.date(1991, 1, 1)",
"datetime.date(1995, 1, 1)",
"datetime.date(2007, 1, 1)",
"datetime.date(2008, 1, 1)"
]
)
def test_values_aggregation(self):
# Refs #20782
max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating'))
self.assertEqual(max_rating['max_rating'], 5)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id')
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3})
def test_ticket17424(self):
"""
Check that doing exclude() on a foreign model after annotate()
doesn't crash.
"""
all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk'))
annotated_books = Book.objects.order_by('pk').annotate(one=Count("id"))
# The value doesn't matter, we just need any negative
# constraint on a related model that's a noop.
excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
# Try to generate query tree
str(excluded_books.query)
self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
def test_ticket12886(self):
"""
Check that aggregation over sliced queryset works correctly.
"""
qs = Book.objects.all().order_by('-rating')[0:3]
vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating']
self.assertAlmostEqual(vals, 4.5, places=2)
def test_ticket11881(self):
"""
Check that subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE
or select_related() stuff.
"""
qs = Book.objects.all().select_for_update().order_by(
'pk').select_related('publisher').annotate(max_pk=Max('pk'))
with CaptureQueriesContext(connection) as captured_queries:
qs.aggregate(avg_pk=Avg('max_pk'))
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]['sql'].lower()
self.assertNotIn('for update', qstr)
forced_ordering = connection.ops.force_no_ordering()
if forced_ordering:
# If the backend needs to force an ordering we make sure it's
# the only "ORDER BY" clause present in the query.
self.assertEqual(
re.findall(r'order by (\w+)', qstr),
[', '.join(f[1][0] for f in forced_ordering).lower()]
)
else:
self.assertNotIn('order by', qstr)
self.assertEqual(qstr.count(' join '), 0)
def test_decimal_max_digits_has_no_effect(self):
Book.objects.all().delete()
a1 = Author.objects.first()
p1 = Publisher.objects.first()
thedate = timezone.now()
for i in range(10):
Book.objects.create(
isbn="abcde{}".format(i), name="none", pages=10, rating=4.0,
price=9999.98, contact=a1, publisher=p1, pubdate=thedate)
book = Book.objects.aggregate(price_sum=Sum('price'))
self.assertEqual(book['price_sum'], Decimal("99999.80"))
def test_nonaggregate_aggregation_throws(self):
with six.assertRaisesRegex(self, TypeError, 'fail is not an aggregate expression'):
Book.objects.aggregate(fail=F('price'))
def test_nonfield_annotation(self):
book = Book.objects.annotate(val=Max(Value(2, output_field=IntegerField()))).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(Value(2), output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
def test_missing_output_field_raises_error(self):
with six.assertRaisesRegex(self, FieldError, 'Cannot resolve expression type, unknown output_field'):
Book.objects.annotate(val=Max(2)).first()
def test_annotation_expressions(self):
authors = Author.objects.annotate(combined_ages=Sum(F('age') + F('friends__age'))).order_by('name')
authors2 = Author.objects.annotate(combined_ages=Sum('age') + Sum('friends__age')).order_by('name')
for qs in (authors, authors2):
self.assertEqual(len(qs), 9)
self.assertQuerysetEqual(
qs, [
('Adrian Holovaty', 132),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 129),
('James Bennett', 63),
('Jeffrey Forcier', 128),
('Paul Bissex', 120),
('Peter Norvig', 103),
('Stuart Russell', 103),
('Wesley J. Chun', 176)
],
lambda a: (a.name, a.combined_ages)
)
def test_aggregation_expressions(self):
a1 = Author.objects.aggregate(av_age=Sum('age') / Count('*'))
a2 = Author.objects.aggregate(av_age=Sum('age') / Count('age'))
a3 = Author.objects.aggregate(av_age=Avg('age'))
self.assertEqual(a1, {'av_age': 37})
self.assertEqual(a2, {'av_age': 37})
self.assertEqual(a3, {'av_age': Approximate(37.4, places=1)})
def test_order_of_precedence(self):
p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price') + 2) * 3)
self.assertEqual(p1, {'avg_price': Approximate(148.18, places=2)})
p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg('price') + 2 * 3)
self.assertEqual(p2, {'avg_price': Approximate(53.39, places=2)})
def test_combine_different_types(self):
with six.assertRaisesRegex(self, FieldError, 'Expression contains mixed types. You must set output_field'):
Book.objects.annotate(sums=Sum('rating') + Sum('pages') + Sum('price')).get(pk=self.b4.pk)
b1 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=IntegerField())).get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
b2 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=FloatField())).get(pk=self.b4.pk)
self.assertEqual(b2.sums, 383.69)
b3 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=DecimalField())).get(pk=self.b4.pk)
self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2))
def test_complex_aggregations_require_kwarg(self):
with six.assertRaisesRegex(self, TypeError, 'Complex annotations require an alias'):
Author.objects.annotate(Sum(F('age') + F('friends__age')))
with six.assertRaisesRegex(self, TypeError, 'Complex aggregates require an alias'):
Author.objects.aggregate(Sum('age') / Count('age'))
with six.assertRaisesRegex(self, TypeError, 'Complex aggregates require an alias'):
Author.objects.aggregate(Sum(1))
def test_aggregate_over_complex_annotation(self):
qs = Author.objects.annotate(
combined_ages=Sum(F('age') + F('friends__age')))
age = qs.aggregate(max_combined_age=Max('combined_ages'))
self.assertEqual(age['max_combined_age'], 176)
age = qs.aggregate(max_combined_age_doubled=Max('combined_ages') * 2)
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age=Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age'], 954)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age_doubled=Sum('combined_ages') + Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age_doubled'], 954 * 2)
def test_values_annotation_with_expression(self):
# ensure the F() is promoted to the group by clause
qs = Author.objects.values('name').annotate(another_age=Sum('age') + F('age'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['another_age'], 68)
qs = qs.annotate(friend_count=Count('friends'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['friend_count'], 2)
qs = qs.annotate(combined_age=Sum('age') + F('friends__age')).filter(
name="Adrian Holovaty").order_by('-combined_age')
self.assertEqual(
list(qs), [
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 69
},
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 63
}
]
)
vals = qs.values('name', 'combined_age')
self.assertEqual(
list(vals), [
{
"name": 'Adrian Holovaty',
"combined_age": 69
},
{
"name": 'Adrian Holovaty',
"combined_age": 63
}
]
)
def test_annotate_values_aggregate(self):
alias_age = Author.objects.annotate(
age_alias=F('age')
).values(
'age_alias',
).aggregate(sum_age=Sum('age_alias'))
age = Author.objects.values('age').aggregate(sum_age=Sum('age'))
self.assertEqual(alias_age['sum_age'], age['sum_age'])
def test_annotate_over_annotate(self):
author = Author.objects.annotate(
age_alias=F('age')
).annotate(
sum_age=Sum('age_alias')
).get(name="Adrian Holovaty")
other_author = Author.objects.annotate(
sum_age=Sum('age')
).get(name="Adrian Holovaty")
self.assertEqual(author.sum_age, other_author.sum_age)
def test_annotated_aggregate_over_annotated_aggregate(self):
with six.assertRaisesRegex(self, FieldError, "Cannot compute Sum\('id__max'\): 'id__max' is an aggregate"):
Book.objects.annotate(Max('id')).annotate(Sum('id__max'))
def test_add_implementation(self):
try:
# test completely changing how the output is rendered
def lower_case_function_override(self, compiler, connection):
sql, params = compiler.compile(self.source_expressions[0])
substitutions = dict(function=self.function.lower(), expressions=sql)
substitutions.update(self.extra)
return self.template % substitutions, params
setattr(Sum, 'as_' + connection.vendor, lower_case_function_override)
qs = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=IntegerField()))
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test changing the dict and delegating
def lower_case_function_super(self, compiler, connection):
self.extra['function'] = self.function.lower()
return super(Sum, self).as_sql(compiler, connection)
setattr(Sum, 'as_' + connection.vendor, lower_case_function_super)
qs = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=IntegerField()))
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test overriding all parts of the template
def be_evil(self, compiler, connection):
substitutions = dict(function='MAX', expressions='2')
substitutions.update(self.extra)
return self.template % substitutions, ()
setattr(Sum, 'as_' + connection.vendor, be_evil)
qs = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=IntegerField()))
self.assertEqual(str(qs.query).count('MAX('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 2)
finally:
delattr(Sum, 'as_' + connection.vendor)
def test_complex_values_aggregation(self):
max_rating = Book.objects.values('rating').aggregate(
double_max_rating=Max('rating') + Max('rating'))
self.assertEqual(max_rating['double_max_rating'], 5 * 2)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id') + 5
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3 + 5})
def test_expression_on_aggregation(self):
# Create a plain expression
class Greatest(Func):
function = 'GREATEST'
def as_sqlite(self, compiler, connection):
return super(Greatest, self).as_sql(compiler, connection, function='MAX')
qs = Publisher.objects.annotate(
price_or_median=Greatest(Avg('book__rating'), Avg('book__price'))
).filter(price_or_median__gte=F('num_awards')).order_by('num_awards')
self.assertQuerysetEqual(
qs, [1, 3, 7, 9], lambda v: v.num_awards)
qs2 = Publisher.objects.annotate(
rating_or_num_awards=Greatest(Avg('book__rating'), F('num_awards'),
output_field=FloatField())
).filter(rating_or_num_awards__gt=F('num_awards')).order_by('num_awards')
self.assertQuerysetEqual(
qs2, [1, 3], lambda v: v.num_awards)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_backwards_compatibility(self):
from django.db.models.sql import aggregates as sql_aggregates
class SqlNewSum(sql_aggregates.Aggregate):
sql_function = 'SUM'
class NewSum(Aggregate):
name = 'Sum'
def add_to_query(self, query, alias, col, source, is_summary):
klass = SqlNewSum
aggregate = klass(
col, source=source, is_summary=is_summary, **self.extra)
query.annotations[alias] = aggregate
qs = Author.objects.values('name').annotate(another_age=NewSum('age') + F('age'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['another_age'], 68)
| RevelSystems/django | tests/aggregation/tests.py | Python | bsd-3-clause | 41,959 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Cholesky."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class CholeskyOpTest(XLATestCase):
def _verifyCholeskyBase(self, sess, placeholder, x, chol, verification, atol):
chol_np, verification_np = sess.run([chol, verification], {placeholder: x})
self.assertAllClose(x, verification_np, atol=atol)
self.assertShapeEqual(x, chol)
# Check that the cholesky is lower triangular, and has positive diagonal
# elements.
if chol_np.shape[-1] > 0:
chol_reshaped = np.reshape(chol_np, (-1, chol_np.shape[-2],
chol_np.shape[-1]))
for chol_matrix in chol_reshaped:
self.assertAllClose(chol_matrix, np.tril(chol_matrix), atol=atol)
self.assertTrue((np.diag(chol_matrix) > 0.0).all())
def _verifyCholesky(self, x, atol=1e-6):
# Verify that LL^T == x.
with self.test_session() as sess:
placeholder = array_ops.placeholder(
dtypes.as_dtype(x.dtype), shape=x.shape)
with self.test_scope():
chol = linalg_ops.cholesky(placeholder)
verification = math_ops.matmul(chol, chol, adjoint_b=True)
self._verifyCholeskyBase(sess, placeholder, x, chol, verification, atol)
def testBasic(self):
data = np.array([[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]])
for dtype in self.float_types:
self._verifyCholesky(data.astype(dtype))
def testBatch(self):
for dtype in self.float_types:
simple_array = np.array(
[[[1., 0.], [0., 5.]]], dtype=dtype) # shape (1, 2, 2)
self._verifyCholesky(simple_array)
self._verifyCholesky(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array(
[[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]], dtype=dtype)
self._verifyCholesky(np.vstack((odd_sized_array, odd_sized_array)))
# Generate random positive-definite matrices.
matrices = np.random.rand(10, 5, 5).astype(dtype)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T, matrices[i])
self._verifyCholesky(matrices, atol=1e-4)
def testNonSquareMatrix(self):
for dtype in self.float_types:
with self.assertRaises(ValueError):
linalg_ops.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]], dtype=dtype))
with self.assertRaises(ValueError):
linalg_ops.cholesky(
np.array(
[[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]]],
dtype=dtype))
def testWrongDimensions(self):
for dtype in self.float_types:
tensor3 = constant_op.constant([1., 2.], dtype=dtype)
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
@unittest.skip("Test is slow")
def testLarge(self):
n = 200
shape = (n, n)
data = np.ones(shape).astype(np.float32) / (2.0 * n) + np.diag(
np.ones(n).astype(np.float32))
self._verifyCholesky(data, atol=1e-4)
def testMatrixConditionNumbers(self):
for dtype in self.float_types:
condition_number = 1000
size = 20
# Generate random positive-definite symmetric matrices, and take their
# Eigendecomposition.
matrix = np.random.rand(size, size)
matrix = np.dot(matrix.T, matrix)
_, w = np.linalg.eigh(matrix)
# Build new Eigenvalues exponentially distributed between 1 and
# 1/condition_number
v = np.exp(-np.log(condition_number) * np.linspace(0, size, size) / size)
matrix = np.dot(np.dot(w, np.diag(v)), w.T).astype(dtype)
self._verifyCholesky(matrix, atol=1e-4)
if __name__ == "__main__":
test.main()
| Kongsea/tensorflow | tensorflow/compiler/tests/cholesky_op_test.py | Python | apache-2.0 | 4,872 |
# -*- coding: utf-8 -*-
################################################################################
#
# WeatherLog: dialogs/calendar_dialog.py
# This dialog enters a date using calendars.
#
################################################################################
# Import GTK for the dialog.
from gi.repository import Gtk
class CalendarDialog(Gtk.Dialog):
"""Shows the calendar dialog."""
def __init__(self, parent, title, label, day=None, month=None, year=None):
"""Create the dialog."""
Gtk.Dialog.__init__(self, title, parent, Gtk.DialogFlags.MODAL, use_header_bar=True)
self.add_button("Cancel", Gtk.ResponseType.CANCEL)
self.add_button("OK", Gtk.ResponseType.OK)
# Create the header bar.
header = self.get_header_bar()
header.set_title(title)
header.set_subtitle(label)
# Create the grid and widgets.
info_box = self.get_content_area()
self.info_cal = Gtk.Calendar()
info_box.add(self.info_cal)
# Set the default date.
if day is not None:
self.info_cal.select_month(month, year)
self.info_cal.select_day(day)
# Connect 'Enter' key to the OK button.
ok_btn = self.get_widget_for_response(response_id=Gtk.ResponseType.OK)
ok_btn.set_can_default(True)
ok_btn.grab_default()
# Show the dialog.
self.show_all()
| achesak/weatherlog | resources/dialogs/calendar_dialog.py | Python | gpl-3.0 | 1,429 |
from pyomo.environ import *
def build_model(model):
####################################################################
# generator power output at t=0 (initial condition). units are MW. #
####################################################################
model.PowerGeneratedT0 = Var(model.Generators, within=NonNegativeReals)
##############################################################################################
# number of pieces in the linearization of each generator's quadratic cost production curve. #
##############################################################################################
#NL_j Higher the num pieces, better approx -> but harder to solve. Same for all genco offer curves
model.NumGeneratorCostCurvePieces = Param(within=PositiveIntegers, default=3, mutable=True)
_build_load(model)
_build_lines(model)
def _build_load(model):
#
# Variables
#
# Total demand for reserve requirement
model.TotalDemand = Var(model.TimePeriods, within=NonNegativeReals)
def calculate_total_demand(m, t):
return m.TotalDemand[t] == sum(m.Demand[b,t] for b in m.Buses)
model.CalculateTotalDemand = Constraint(model.TimePeriods, rule=calculate_total_demand)
def _build_lines(model):
# amount of power flowing along each line, at each time period
model.LinePower = Var(model.TransmissionLines, model.TimePeriods, initialize=0)
def lower_line_power_bounds_rule(m, l, t):
if m.EnforceLine[l]:
return -m.ThermalLimit[l] <= m.LinePower[l, t]
else:
return Constraint.Skip
model.LinePowerConstraintLower = Constraint(model.TransmissionLines, model.TimePeriods, rule=lower_line_power_bounds_rule)
def upper_line_power_bounds_rule(m, l, t):
if m.EnforceLine[l]:
return m.ThermalLimit[l] >= m.LinePower[l, t]
else:
return Constraint.Skip
model.LinePowerConstraintHigher = Constraint(model.TransmissionLines, model.TimePeriods, rule=upper_line_power_bounds_rule)
def _build_non_dispatchable(model):
# assume wind can be curtailed, then wind power is a decision variable
def nd_bounds_rule(m,n,t):
return (m.MinNondispatchablePower[n,t], m.MaxNondispatchablePower[n,t])
model.NondispatchablePowerUsed = Var(model.NondispatchableGenerators, model.TimePeriods, within=NonNegativeReals, bounds=nd_bounds_rule)
def _build_bus(model):
# voltage angles at the buses (S) (lock the first bus at 0) in radians
model.Angle = Var(model.Buses, model.TimePeriods, within=Reals, bounds=(-3.14159265,3.14159265))
def fix_first_angle_rule(m, t):
return m.Angle[m.Buses[1],t] == 0.0
model.FixFirstAngle = Constraint(model.TimePeriods, rule=fix_first_angle_rule)
def _build_generators(model):
# indicator variables for each generator, at each time period.
model.UnitOn = Var(model.Generators, model.TimePeriods, within=Binary, initialize=1)
# amount of power produced by each generator, at each time period.
def power_bounds_rule(m, g, t):
return (m.MinimumPowerOutput[g], m.MaximumPowerOutput[g])
model.PowerGenerated = Var(model.Generators, model.TimePeriods, within=NonNegativeReals, bounds=power_bounds_rule)
# maximum power output for each generator, at each time period.
model.MaximumPowerAvailable = Var(model.Generators, model.TimePeriods, within=NonNegativeReals)
def _build_cost(model):
###################
# cost components #
###################
# production cost associated with each generator, for each time period.
model.ProductionCost = Var(model.Generators, model.TimePeriods, within=NonNegativeReals)
# startup and shutdown costs for each generator, each time period.
model.StartupCost = Var(model.Generators, model.TimePeriods, within=NonNegativeReals)
model.ShutdownCost = Var(model.Generators, model.TimePeriods, within=NonNegativeReals)
# (implicit) binary denoting whether starting up a generator will cost HotStartCost or ColdStartCost
model.HotStart = Var(model.Generators, model.TimePeriods, bounds=(0,1))
def _build_generation_load(model):
#####################################################
# load "shedding" can be both positive and negative #
#####################################################
model.LoadGenerateMismatch = Var(model.Buses, model.TimePeriods, within = Reals, initialize=0)
model.posLoadGenerateMismatch = Var(model.Buses, model.TimePeriods, within = NonNegativeReals, initialize=0)
model.negLoadGenerateMismatch = Var(model.Buses, model.TimePeriods, within = NonNegativeReals, initialize=0)
model.GlobalLoadGenerateMismatch = Var(model.TimePeriods, within = Reals, initialize=0)
model.posGlobalLoadGenerateMismatch = Var(model.TimePeriods, within = NonNegativeReals, initialize=0)
model.negGlobalLoadGenerateMismatch = Var(model.TimePeriods, within = NonNegativeReals, initialize=0)
# the following constraints are necessarily, at least in the case of CPLEX 12.4, to prevent
# the appearance of load generation mismatch component values in the range of *negative* e-5.
# what these small negative values do is to cause the optimal objective to be a very large negative,
# due to obviously large penalty values for under or over-generation. JPW would call this a heuristic
# at this point, but it does seem to work broadly. we tried a single global constraint, across all
# buses, but that failed to correct the problem, and caused the solve times to explode.
def pos_load_generate_mismatch_tolerance_rule(m, b):
return sum((m.posLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0
model.PosLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=pos_load_generate_mismatch_tolerance_rule)
def neg_load_generate_mismatch_tolerance_rule(m, b):
return sum((m.negLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0
model.NegLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=neg_load_generate_mismatch_tolerance_rule)
def _build_stage_costs(model):
pass
def _build_miscellaneous(model):
##############################
# Storage decision variables #
##############################
# binary variables for storage (input/output are semicontinuous)
model.OutputStorage = Var(model.Storage, model.TimePeriods, within=Binary)
model.InputStorage = Var(model.Storage, model.TimePeriods, within=Binary)
# amount of output power of each storage unit, at each time period
def power_output_storage_bounds_rule(m, s, t):
return (0, m.MaximumPowerOutputStorage[s])
model.PowerOutputStorage = Var(model.Storage, model.TimePeriods, within=NonNegativeReals, bounds=power_output_storage_bounds_rule)
# amount of input power of each storage unit, at each time period
def power_input_storage_bounds_rule(m, s, t):
return (0, m.MaximumPowerInputStorage[s])
model.PowerInputStorage = Var(model.Storage, model.TimePeriods, within=NonNegativeReals, bounds=power_input_storage_bounds_rule)
# state of charge of each storage unit, at each time period
model.SocStorage = Var(model.Storage, model.TimePeriods, within=PercentFraction)
#################################################
# per-stage cost variables - necessary for PySP #
#################################################
#
# Constraints
#
# Power balance at each node (S)
def power_balance(m, b, t):
# bus b, time t (S)
return sum((1 - m.GeneratorForcedOutage[g,t]) * m.GeneratorBusContributionFactor[g, b] * m.PowerGenerated[g, t] for g in m.GeneratorsAtBus[b]) \
+ sum(m.PowerOutputStorage[s, t] for s in m.StorageAtBus[b])\
- sum(m.PowerInputStorage[s, t] for s in m.StorageAtBus[b])\
+ sum(m.NondispatchablePowerUsed[g, t] for g in m.NondispatchableGeneratorsAtBus[b]) \
+ sum(m.LinePower[l,t] for l in m.LinesTo[b]) \
- sum(m.LinePower[l,t] for l in m.LinesFrom[b]) \
+ m.LoadGenerateMismatch[b,t] \
== m.Demand[b, t]
model.PowerBalance = Constraint(model.Buses, model.TimePeriods, rule=power_balance)
# give meaning to the positive and negative parts of the mismatch
def posneg_rule(m, b, t):
return m.posLoadGenerateMismatch[b, t] - m.negLoadGenerateMismatch[b, t] == m.LoadGenerateMismatch[b, t]
model.Defineposneg_Mismatch = Constraint(model.Buses, model.TimePeriods, rule = posneg_rule)
def global_posneg_rule(m, t):
return m.posGlobalLoadGenerateMismatch[t] - m.negGlobalLoadGenerateMismatch[t] == m.GlobalLoadGenerateMismatch[t]
model.Global_Defineposneg_Mismatch = Constraint(model.TimePeriods, rule = global_posneg_rule)
# ensure there is sufficient maximal power output available to meet both the
# demand and the spinning reserve requirements in each time period.
# encodes Constraint 3 in Carrion and Arroyo.
model.LoggingCheck26 = BuildCheck(rule=debugger("Enfore reserve requirements rule"))
def enforce_reserve_requirements_rule(m, t):
return sum(m.MaximumPowerAvailable[g, t] for g in m.Generators) \
+ sum(m.NondispatchablePowerUsed[n,t] for n in m.NondispatchableGenerators) \
+ sum(m.PowerOutputStorage[s,t] for s in m.Storage) \
== \
m.TotalDemand[t] + m.ReserveRequirement[t] + m.GlobalLoadGenerateMismatch[t]
model.EnforceReserveRequirements = Constraint(model.TimePeriods, rule=enforce_reserve_requirements_rule)
# CASM: zonal reserve requirement - ensure there is enough "regulation" reserve
# in each reserve zone and each time period - This is not an accurate representation or reg up reserves.
# It will be refined after verification with Alstom. It's just to see if the zonal reserve requirement
# works.
model.LoggingCheck27 = BuildCheck(rule=debugger("Create regulating reserve constraints"))
model.RegulatingReserveUpAvailable = Var(model.Generators, model.TimePeriods, initialize=0.0, within=NonNegativeReals)
def calculate_regulating_reserve_up_available_per_generator(m, g, t):
return m.RegulatingReserveUpAvailable[g, t] == m.MaximumPowerAvailable[g,t] - m.PowerGenerated[g,t]
model.CalculateRegulatingReserveUpPerGenerator = Constraint(model.Generators, model.TimePeriods, rule=calculate_regulating_reserve_up_available_per_generator)
def enforce_zonal_reserve_requirement_rule(m, rz, t):
return sum(m.RegulatingReserveUpAvailable[g,t] for g in m.GeneratorsInReserveZone[rz]) >= m.ZonalReserveRequirement[rz, t]
model.EnforceZonalReserveRequirements = Constraint(model.ReserveZones, model.TimePeriods, rule=enforce_zonal_reserve_requirement_rule)
############################################
# generation limit and ramping constraints #
############################################
# enforce the generator power output limits on a per-period basis.
# the maximum power available at any given time period is dynamic,
# bounded from above by the maximum generator output.
# the following three constraints encode Constraints 16 and 17 defined in Carrion and Arroyo.
# NOTE: The expression below is what we really want - however, due to a pyomo design feature, we have to split it into two constraints:
# m.MinimumPowerOutput[g] * m.UnitOn[g, t] <= m.PowerGenerated[g,t] <= m.MaximumPowerAvailable[g, t] <= m.MaximumPowerOutput[g] * m.UnitOn[g, t]
model.LoggingCheck28 = BuildCheck(rule=debugger("Generator power output constraints"))
def enforce_generator_output_limits_rule_part_a(m, g, t):
return m.MinimumPowerOutput[g] * m.UnitOn[g, t] <= m.PowerGenerated[g,t]
model.EnforceGeneratorOutputLimitsPartA = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_a)
def enforce_generator_output_limits_rule_part_b(m, g, t):
return m.PowerGenerated[g,t] <= m.MaximumPowerAvailable[g, t]
model.EnforceGeneratorOutputLimitsPartB = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_b)
def enforce_generator_output_limits_rule_part_c(m, g, t):
return m.MaximumPowerAvailable[g,t] <= m.MaximumPowerOutput[g] * m.UnitOn[g, t]
model.EnforceGeneratorOutputLimitsPartC = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_c)
# note: as of 9 Feb 2012 wind is done using Var bounds
# impose upper bounds on the maximum power available for each generator in each time period,
# based on standard and start-up ramp limits.
# the following constraint encodes Constraint 18 defined in Carrion and Arroyo.
def enforce_max_available_ramp_up_rates_rule(m, g, t):
# 4 cases, split by (t-1, t) unit status (RHS is defined as the delta from m.PowerGenerated[g, t-1])
# (0, 0) - unit staying off: RHS = maximum generator output (degenerate upper bound due to unit being off)
# (0, 1) - unit switching on: RHS = startup ramp limit
# (1, 0) - unit switching off: RHS = standard ramp limit minus startup ramp limit plus maximum power output (degenerate upper bound due to unit off)
# (1, 1) - unit staying on: RHS = standard ramp limit plus power generated in previous time period
if t == 0:
return m.MaximumPowerAvailable[g, t] <= m.PowerGeneratedT0[g] + \
m.ScaledNominalRampUpLimit[g] * m.UnitOnT0[g] + \
m.ScaledStartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOnT0[g]) + \
m.MaximumPowerOutput[g] * (1 - m.UnitOn[g, t])
else:
return m.MaximumPowerAvailable[g, t] <= m.PowerGenerated[g, t-1] + \
m.ScaledNominalRampUpLimit[g] * m.UnitOn[g, t-1] + \
m.ScaledStartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1]) + \
m.MaximumPowerOutput[g] * (1 - m.UnitOn[g, t])
model.EnforceMaxAvailableRampUpRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_up_rates_rule)
# the following constraint encodes Constraint 19 defined in Carrion and Arroyo.
def enforce_max_available_ramp_down_rates_rule(m, g, t):
# 4 cases, split by (t, t+1) unit status
# (0, 0) - unit staying off: RHS = 0 (degenerate upper bound)
# (0, 1) - unit switching on: RHS = maximum generator output minus shutdown ramp limit (degenerate upper bound) - this is the strangest case.
# (1, 0) - unit switching off: RHS = shutdown ramp limit
# (1, 1) - unit staying on: RHS = maximum generator output (degenerate upper bound)
#NOTE: As expressed in Carrion-Arroyo and subsequently here, this constraint does NOT consider ramp down from initial conditions to t=1!
#if t == value(m.NumTimePeriods):
# return Constraint.Skip
#else:
# return m.MaximumPowerAvailable[g, t] <= \
# m.MaximumPowerOutput[g] * m.UnitOn[g, t+1] + \
# m.ScaledShutdownRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t+1])
#This version fixes the problem with ignoring initial conditions mentioned in the above note
if t == 0:
# Not 100% sure of this one since there is no MaximumPowerAvailableT0
return m.PowerGeneratedT0[g] <= \
m.MaximumPowerOutput[g] * m.UnitOn[g,t] + \
m.ScaledShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g,t])
else:
return m.MaximumPowerAvailable[g, t-1] <= \
m.MaximumPowerOutput[g] * m.UnitOn[g, t] + \
m.ScaledShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t])
model.EnforceMaxAvailableRampDownRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_down_rates_rule)
# the following constraint encodes Constraint 20 defined in Carrion and Arroyo.
def enforce_ramp_down_limits_rule(m, g, t):
# 4 cases, split by (t-1, t) unit status:
# (0, 0) - unit staying off: RHS = maximum generator output (degenerate upper bound)
# (0, 1) - unit switching on: RHS = standard ramp-down limit minus shutdown ramp limit plus maximum generator output - this is the strangest case.
#NOTE: This may never be physically true, but if a generator has ScaledShutdownRampLimit >> MaximumPowerOutput, this constraint causes problems
# (1, 0) - unit switching off: RHS = shutdown ramp limit
# (1, 1) - unit staying on: RHS = standard ramp-down limit
if t == 0:
return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] <= \
m.ScaledNominalRampDownLimit[g] * m.UnitOn[g, t] + \
m.ScaledShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) + \
m.MaximumPowerOutput[g] * (1 - m.UnitOnT0[g])
else:
return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] <= \
m.ScaledNominalRampDownLimit[g] * m.UnitOn[g, t] + \
m.ScaledShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) + \
m.MaximumPowerOutput[g] * (1 - m.UnitOn[g, t-1])
model.EnforceScaledNominalRampDownLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_down_limits_rule)
#######################################
# energy storage bounding constraints #
#######################################
# NOTE: The expressions below are what we really want - however, due to a pyomo design feature, we have to split it into two constraints:
# m.MinimumPowerInputStorage[g] * m.InputStorage[g, t] <= m.StoragePowerInput[g,t] <= m.MaximumPowerInputStorage[g] * m.InputStorage[g, t]
# m.MinimumPowerOutputStorage[g] * m.OutputStorage[g, t] <= m.StoragePowerOutput[g,t] <= m.MaximumPowerOutputStorage[g] * m.OutputStorage[g, t]
model.LoggingCheck29 = BuildCheck(rule=debugger("Storage energy constraints"))
def enforce_storage_input_limits_rule_part_a(m, s, t):
return m.MinimumPowerInputStorage[s] * m.InputStorage[s, t] <= m.PowerInputStorage[s,t]
model.EnforceStorageInputLimitsPartA = Constraint(model.Storage, model.TimePeriods, rule=enforce_storage_input_limits_rule_part_a)
def enforce_storage_input_limits_rule_part_b(m, s, t):
return m.PowerInputStorage[s,t] <= m.MaximumPowerInputStorage[s] * m.InputStorage[s, t]
model.EnforceStorageInputLimitsPartB = Constraint(model.Storage, model.TimePeriods, rule=enforce_storage_input_limits_rule_part_b)
def enforce_storage_output_limits_rule_part_a(m, s, t):
return m.MinimumPowerOutputStorage[s] * m.OutputStorage[s, t] <= m.PowerOutputStorage[s,t]
model.EnforceStorageOutputLimitsPartA = Constraint(model.Storage, model.TimePeriods, rule=enforce_storage_output_limits_rule_part_a)
def enforce_storage_output_limits_rule_part_b(m, s, t):
return m.PowerOutputStorage[s,t] <= m.MaximumPowerOutputStorage[s] * m.OutputStorage[s, t]
model.EnforceStorageOutputLimitsPartB = Constraint(model.Storage, model.TimePeriods, rule=enforce_storage_output_limits_rule_part_b)
def enforce_input_output_exclusivity_rule(m, s, t):
return m.PowerOutputStorage[s,t] + m.PowerInputStorage[s,t] <= 1
#model.EnforceInputOutputExclusivity = Constraint(model.Storage, model.TimePeriods, rule=enforce_input_output_exclusivity_rule)
#####################################
# energy storage ramping contraints #
#####################################
def enforce_ramp_up_rates_power_output_storage_rule(m, s, t):
if t == 0:
return m.PowerOutputStorage[s, t] <= m.StoragePowerOutputOnT0[s] + m.ScaledNominalRampUpLimitStorageOutput[s]
else:
return m.PowerOutputStorage[s, t] <= m.PowerOutputStorage[s, t-1] + m.ScaledNominalRampUpLimitStorageOutput[s]
model.EnforceStorageOutputRampUpRates = Constraint(model.Storage, model.TimePeriods, rule=enforce_ramp_up_rates_power_output_storage_rule)
def enforce_ramp_down_rates_power_output_storage_rule(m, s, t):
if t == 0:
return m.PowerOutputStorage[s, t] >= m.StoragePowerOutputOnT0[s] - m.ScaledNominalRampDownLimitStorageOutput[s]
else:
return m.PowerOutputStorage[s, t] >= m.PowerOutputStorage[s, t-1] - m.ScaledNominalRampDownLimitStorageOutput[s]
model.EnforceStorageOutputRampDownRates = Constraint(model.Storage, model.TimePeriods, rule=enforce_ramp_down_rates_power_output_storage_rule)
def enforce_ramp_up_rates_power_input_storage_rule(m, s, t):
if t == 0:
return m.PowerInputStorage[s, t] <= m.StoragePowerInputOnT0[s] + m.ScaledNominalRampUpLimitStorageInput[s]
else:
return m.PowerInputStorage[s, t] <= m.PowerInputStorage[s, t-1] + m.ScaledNominalRampUpLimitStorageInput[s]
model.EnforceStorageInputRampUpRates = Constraint(model.Storage, model.TimePeriods, rule=enforce_ramp_up_rates_power_input_storage_rule)
def enforce_ramp_down_rates_power_input_storage_rule(m, s, t):
if t == 0:
return m.PowerInputStorage[s, t] >= m.StoragePowerInputOnT0[s] - m.ScaledNominalRampDownLimitStorageInput[s]
else:
return m.PowerInputStorage[s, t] >= m.PowerInputStorage[s, t-1] - m.ScaledNominalRampDownLimitStorageInput[s]
model.EnforceStorageInputRampDownRates = Constraint(model.Storage, model.TimePeriods, rule=enforce_ramp_down_rates_power_input_storage_rule)
##########################################
# storage energy conservation constraint #
##########################################
def energy_conservation_rule(m, s, t):
# storage s, time t
if t == 0:
return m.SocStorage[s, t] == m.StorageSocOnT0[s] + \
(- m.PowerOutputStorage[s, t] + m.PowerInputStorage[s,t]*m.EfficiencyEnergyStorage[s])/m.MaximumEnergyStorage[s]
else:
return m.SocStorage[s, t] == m.SocStorage[s, t-1] + \
(- m.PowerOutputStorage[s, t] + m.PowerInputStorage[s,t]*m.EfficiencyEnergyStorage[s])/m.MaximumEnergyStorage[s]
model.EnergyConservation = Constraint(model.Storage, model.TimePeriods, rule=energy_conservation_rule)
##################################
# storage end-point constraints #
##################################
def storage_end_point_soc_rule(m, s):
# storage s, last time period
return m.SocStorage[s, value(m.NumTimePeriods)] == m.EndPointSocStorage[s]
#model.EnforceEndPointSocStorage = Constraint(model.Storage, rule=storage_end_point_soc_rule)
#############################################
# constraints for computing cost components #
#############################################
# compute the per-generator, per-time period production costs. this is a "simple" piecewise linear construct.
# the first argument to piecewise is the index set. the second and third arguments are respectively the input and output variables.
if config['linearized_cost_curve']:
model.ComputeProductionCosts = Piecewise(model.Generators * model.TimePeriods, model.ProductionCost, model.PowerGenerated, pw_pts=model.PowerGenerationPiecewisePoints, f_rule=production_cost_function, pw_constr_type='LB')
else:
def compute_production_cost_rule(m, g, t):
return m.ProductionCost[g, t] >= value(m.ProductionCostA0[g]) + value(m.ProductionCostA1[g]) * m.PowerGenerated[g, t] + value(m.ProductionCostA2[g]) * m.PowerGenerated[g, t] * m.PowerGenerated[g, t]
model.ComputeProductionCosts = Constraint(model.Generators, model.TimePeriods, rule=compute_production_cost_rule)
# compute the total production costs, across all generators and time periods.
"""def compute_total_production_cost_rule(m):
return m.TotalProductionCost == sum(m.ProductionCost[g, t] for g in m.Generators for t in m.TimePeriods)
model.ComputeTotalProductionCost = Constraint(rule=compute_total_production_cost_rule)"""
####################################### KICK THAT ONE OUT
model.LoggingCheck30 = BuildCheck(rule=debugger("Start up and shutdown costs"))
# compute startup costs for each generator, for each time period
def compute_hot_start_rule(m, g, t):
if t <= value(m.ColdStartHours[g]):
if t - value(m.ColdStartHours[g]) <= value(m.UnitOnT0State[g]):
m.HotStart[g, t] = 1
m.HotStart[g, t].fixed = True
return Constraint.Skip
else:
return m.HotStart[g, t] <= sum( m.UnitOn[g, i] for i in range(1, t) )
else:
return m.HotStart[g, t] <= sum( m.UnitOn[g, i] for i in range(t - m.ColdStartHours[g], t) )
model.ComputeHotStart = Constraint(model.Generators, model.TimePeriods, rule=compute_hot_start_rule)
def compute_startup_costs_rule_minusM(m, g, t):
if t == 0:
return m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t] \
- m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOnT0[g]))
else:
return m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t] \
- m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOn[g, t-1]))
model.ComputeStartupCostsMinusM = Constraint(model.Generators, model.TimePeriods, rule=compute_startup_costs_rule_minusM)
# compute the per-generator, per-time period shutdown costs.
def compute_shutdown_costs_rule(m, g, t):
if t == 0:
return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOnT0[g] - m.UnitOn[g, t])
else:
return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t])
model.ComputeShutdownCosts = Constraint(model.Generators, model.TimePeriods, rule=compute_shutdown_costs_rule)
"""# compute the total startup and shutdown costs, across all generators and time periods.
def compute_total_fixed_cost_rule(m):
return m.TotalFixedCost == sum(m.StartupCost[g, t] + m.ShutdownCost[g, t] for g in m.Generators for t in m.TimePeriods)
model.ComputeTotalFixedCost = Constraint(rule=compute_total_fixed_cost_rule)"""
########################################## KICK THAT OUT ?
#################################
# InterfaceConstraint
#################################
model.LoggingCheck31 = BuildCheck(rule=debugger("Creating interface power constraints"))
def interface_rule(m, i, t):
return sum(m.LinePower[l, t]*m.LinePowerDirectionCoefficient[i, l] for l in m.LinesInInterface[i]) <= m.InterfaceLimit[i]
model.InterfacePowerConstraint = Constraint(model.Interfaces, model.TimePeriods, rule=interface_rule)
#######################
# up-time constraints #
#######################
model.LoggingCheck32 = BuildCheck(rule=debugger("Enforce minimum up and down time constraints"))
# constraint due to initial conditions.
def enforce_up_time_constraints_initial(m, g):
if value(m.InitialTimePeriodsOnLine[g]) == 0:
return Constraint.Skip
return sum((1 - m.UnitOn[g, t]) for t in m.TimePeriods if t <= value(m.InitialTimePeriodsOnLine[g])) == 0.0
model.EnforceUpTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_up_time_constraints_initial)
# constraint for each time period after that not involving the initial condition.
@simple_constraint_rule
def enforce_up_time_constraints_subsequent(m, g, t):
if t <= value(m.InitialTimePeriodsOnLine[g]):
# handled by the EnforceUpTimeConstraintInitial constraint.
return Constraint.Skip
elif t <= (value(m.NumTimePeriods - m.ScaledMinimumUpTime[g]) + 1):
# the right-hand side terms below are only positive if the unit was off in the previous time period but on in this one =>
# the value is the minimum number of subsequent consecutive time periods that the unit is required to be on.
if t == 0:
return sum(m.UnitOn[g, n] for n in m.TimePeriods if n >= t and n <= (t + value(m.ScaledMinimumUpTime[g]) - 1)) >= \
m.ScaledMinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOnT0[g])
else:
return sum(m.UnitOn[g, n] for n in m.TimePeriods if n >= t and n <= (t + value(m.ScaledMinimumUpTime[g]) - 1)) >= \
m.ScaledMinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1])
else:
# handle the final (ScaledMinimumUpTime[g] - 1) time periods - if a unit is started up in
# this interval, it must remain on-line until the end of the time span.
if t == 0: # can happen when small time horizons are specified
return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOnT0[g])) for n in m.TimePeriods if n >= t) >= 0.0
else:
return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOn[g, t-1])) for n in m.TimePeriods if n >= t) >= 0.0
model.EnforceUpTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_up_time_constraints_subsequent)
#########################
# down-time constraints #
#########################
# constraint due to initial conditions.
def enforce_down_time_constraints_initial(m, g):
if value(m.InitialTimePeriodsOffLine[g]) == 0:
return Constraint.Skip
return sum(m.UnitOn[g, t] for t in m.TimePeriods if t <= value(m.InitialTimePeriodsOffLine[g])) == 0.0
model.EnforceDownTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_down_time_constraints_initial)
# constraint for each time period after that not involving the initial condition.
@simple_constraint_rule
def enforce_down_time_constraints_subsequent(m, g, t):
if t <= value(m.InitialTimePeriodsOffLine[g]):
# handled by the EnforceDownTimeConstraintInitial constraint.
return Constraint.Skip
elif t <= (value(m.NumTimePeriods - m.ScaledMinimumDownTime[g]) + 1):
# the right-hand side terms below are only positive if the unit was off in the previous time period but on in this one =>
# the value is the minimum number of subsequent consecutive time periods that the unit is required to be on.
if t == 0:
return sum((1 - m.UnitOn[g, n]) for n in m.TimePeriods if n >= t and n <= (t + value(m.ScaledMinimumDownTime[g]) - 1)) >= \
m.ScaledMinimumDownTime[g] * (m.UnitOnT0[g] - m.UnitOn[g, t])
else:
return sum((1 - m.UnitOn[g, n]) for n in m.TimePeriods if n >= t and n <= (t + value(m.ScaledMinimumDownTime[g]) - 1)) >= \
m.ScaledMinimumDownTime[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t])
else:
# handle the final (ScaledMinimumDownTime[g] - 1) time periods - if a unit is shut down in
# this interval, it must remain off-line until the end of the time span.
if t == 0: # can happen when small time horizons are specified
return sum(((1 - m.UnitOn[g, n]) - (m.UnitOnT0[g] - m.UnitOn[g, t])) for n in m.TimePeriods if n >= t) >= 0.0
else:
return sum(((1 - m.UnitOn[g, n]) - (m.UnitOn[g, t-1] - m.UnitOn[g, t])) for n in m.TimePeriods if n >= t) >= 0.0
model.EnforceDownTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_down_time_constraints_subsequent)
#
# Cost computations
#
def commitment_in_stage_st_cost_rule(m, st):
return m.CommitmentStageCost[st] == (sum(m.StartupCost[g,t] + m.ShutdownCost[g,t] for g in m.Generators for t in m.CommitmentTimeInStage[st]) + sum(sum(m.UnitOn[g,t] for t in m.CommitmentTimeInStage[st]) * m.MinimumProductionCost[g] * m.TimePeriodLength for g in m.Generators))
model.Compute_commitment_in_stage_st_cost = Constraint(model.StageSet, rule = commitment_in_stage_st_cost_rule)
### NEW COMMITMENT COST RULE
#
def generation_in_stage_st_cost_rule(m, st):
return m.GenerationStageCost[st] == sum(m.ProductionCost[g, t] for g in m.Generators for t in m.GenerationTimeInStage[st]) + m.LoadMismatchPenalty * \
(sum(m.posLoadGenerateMismatch[b, t] + m.negLoadGenerateMismatch[b, t] for b in m.Buses for t in m.GenerationTimeInStage[st]) + \
sum(m.posGlobalLoadGenerateMismatch[t] + m.negGlobalLoadGenerateMismatch[t] for t in m.GenerationTimeInStage[st]))
model.Compute_generation_in_stage_st_cost = Constraint(model.StageSet, rule = generation_in_stage_st_cost_rule)
### NEW GENERATION COST RULE
def StageCost_rule(m, st):
return m.StageCost[st] == m.GenerationStageCost[st] + m.CommitmentStageCost[st]
model.Compute_Stage_Cost = Constraint(model.StageSet, rule = StageCost_rule)
#
# Objectives
#
model.LoggingCheck33 = BuildCheck(rule=debugger("Create objective function"))
def total_cost_objective_rule(m):
return sum(m.StageCost[st] for st in m.StageSet)
model.TotalCostObjective = Objective(rule=total_cost_objective_rule, sense=minimize)
# Create a 'dual' suffix component on the instance
# so the solver plugin will know which suffixes to collect
# Export and import floating point data
# model.dual = Suffix(direction=Suffix.IMPORT_EXPORT)
return model
| kdheepak/psst | psst/model/builder.py | Python | mit | 33,803 |
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The shelved mode extension."""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions as exts
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
auth_shelve = exts.extension_authorizer('compute', 'shelve')
auth_shelve_offload = exts.extension_authorizer('compute', 'shelveOffload')
auth_unshelve = exts.extension_authorizer('compute', 'unshelve')
class ShelveController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ShelveController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _get_instance(self, context, instance_id):
try:
return self.compute_api.get(context, instance_id,
want_objects=True)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(msg)
@wsgi.action('shelve')
def _shelve(self, req, id, body):
"""Move an instance into shelved mode."""
context = req.environ["nova.context"]
auth_shelve(context)
instance = self._get_instance(context, id)
try:
self.compute_api.shelve(context, instance)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'shelve')
return webob.Response(status_int=202)
@wsgi.action('shelveOffload')
def _shelve_offload(self, req, id, body):
"""Force removal of a shelved instance from the compute node."""
context = req.environ["nova.context"]
auth_shelve_offload(context)
instance = self._get_instance(context, id)
try:
self.compute_api.shelve_offload(context, instance)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'shelveOffload')
return webob.Response(status_int=202)
@wsgi.action('unshelve')
def _unshelve(self, req, id, body):
"""Restore an instance from shelved mode."""
context = req.environ["nova.context"]
auth_unshelve(context)
instance = self._get_instance(context, id)
try:
self.compute_api.unshelve(context, instance)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unshelve')
return webob.Response(status_int=202)
class Shelve(exts.ExtensionDescriptor):
"""Instance shelve mode."""
name = "Shelve"
alias = "os-shelve"
namespace = "http://docs.openstack.org/compute/ext/shelve/api/v1.1"
updated = "2013-04-06T00:00:00+00:00"
def get_controller_extensions(self):
controller = ShelveController()
extension = exts.ControllerExtension(self, 'servers', controller)
return [extension]
| OpenAcademy-OpenStack/nova-scheduler | nova/api/openstack/compute/contrib/shelve.py | Python | apache-2.0 | 4,154 |
#
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
Fetch strategies are used to download source code into a staging area
in order to build it. They need to define the following methods:
* fetch()
This should attempt to download/check out source from somewhere.
* check()
Apply a checksum to the downloaded source code, e.g. for an archive.
May not do anything if the fetch method was safe to begin with.
* expand()
Expand (e.g., an archive) downloaded file to source.
* reset()
Restore original state of downloaded code. Used by clean commands.
This may just remove the expanded source and re-expand an archive,
or it may run something like git reset --hard.
* archive()
Archive a source directory, e.g. for creating a mirror.
"""
import os
import sys
import re
import shutil
import copy
from functools import wraps
from six import string_types, with_metaclass
import llnl.util.tty as tty
from llnl.util.filesystem import working_dir, mkdirp
import spack.config
import spack.error
import spack.util.crypto as crypto
import spack.util.pattern as pattern
from spack.util.executable import which
from spack.util.string import comma_or
from spack.version import Version, ver
from spack.util.compression import decompressor_for, extension
#: List of all fetch strategies, created by FetchStrategy metaclass.
all_strategies = []
def _needs_stage(fun):
"""Many methods on fetch strategies require a stage to be set
using set_stage(). This decorator adds a check for self.stage."""
@wraps(fun)
def wrapper(self, *args, **kwargs):
if not self.stage:
raise NoStageError(fun)
return fun(self, *args, **kwargs)
return wrapper
class FSMeta(type):
"""This metaclass registers all fetch strategies in a list."""
def __init__(cls, name, bases, dict):
type.__init__(cls, name, bases, dict)
if cls.enabled:
all_strategies.append(cls)
class FetchStrategy(with_metaclass(FSMeta, object)):
"""Superclass of all fetch strategies."""
enabled = False # Non-abstract subclasses should be enabled.
required_attributes = None # Attributes required in version() args.
def __init__(self):
# The stage is initialized late, so that fetch strategies can be
# constructed at package construction time. This is where things
# will be fetched.
self.stage = None
def set_stage(self, stage):
"""This is called by Stage before any of the fetching
methods are called on the stage."""
self.stage = stage
# Subclasses need to implement these methods
def fetch(self):
"""Fetch source code archive or repo.
Returns:
bool: True on success, False on failure.
"""
def check(self):
"""Checksum the archive fetched by this FetchStrategy."""
def expand(self):
"""Expand the downloaded archive."""
def reset(self):
"""Revert to freshly downloaded state.
For archive files, this may just re-expand the archive.
"""
def archive(self, destination):
"""Create an archive of the downloaded data for a mirror.
For downloaded files, this should preserve the checksum of the
original file. For repositories, it should just create an
expandable tarball out of the downloaded repository.
"""
@property
def cachable(self):
"""Whether fetcher is capable of caching the resource it retrieves.
This generally is determined by whether the resource is
identifiably associated with a specific package version.
Returns:
bool: True if can cache, False otherwise.
"""
def source_id(self):
"""A unique ID for the source.
The returned value is added to the content which determines the full
hash for a package using `str()`.
"""
raise NotImplementedError
def __str__(self): # Should be human readable URL.
return "FetchStrategy.__str___"
# This method is used to match fetch strategies to version()
# arguments in packages.
@classmethod
def matches(cls, args):
return any(k in args for k in cls.required_attributes)
@pattern.composite(interface=FetchStrategy)
class FetchStrategyComposite(object):
"""Composite for a FetchStrategy object.
Implements the GoF composite pattern.
"""
matches = FetchStrategy.matches
set_stage = FetchStrategy.set_stage
def source_id(self):
component_ids = tuple(i.source_id() for i in self)
if all(component_ids):
return component_ids
class URLFetchStrategy(FetchStrategy):
"""FetchStrategy that pulls source code from a URL for an archive,
checks the archive against a checksum,and decompresses the archive.
"""
enabled = True
required_attributes = ['url']
def __init__(self, url=None, digest=None, **kwargs):
super(URLFetchStrategy, self).__init__()
# If URL or digest are provided in the kwargs, then prefer
# those values.
self.url = kwargs.get('url', None)
if not self.url:
self.url = url
self.digest = next((kwargs[h] for h in crypto.hashes if h in kwargs),
None)
if not self.digest:
self.digest = digest
self.expand_archive = kwargs.get('expand', True)
self.extra_curl_options = kwargs.get('curl_options', [])
self._curl = None
self.extension = kwargs.get('extension', None)
if not self.url:
raise ValueError("URLFetchStrategy requires a url for fetching.")
@property
def curl(self):
if not self._curl:
self._curl = which('curl', required=True)
return self._curl
def source_id(self):
return self.digest
@_needs_stage
def fetch(self):
if self.archive_file:
tty.msg("Already downloaded %s" % self.archive_file)
return
save_file = None
partial_file = None
if self.stage.save_filename:
save_file = self.stage.save_filename
partial_file = self.stage.save_filename + '.part'
tty.msg("Fetching %s" % self.url)
if partial_file:
save_args = ['-C',
'-', # continue partial downloads
'-o',
partial_file] # use a .part file
else:
save_args = ['-O']
curl_args = save_args + [
'-f', # fail on >400 errors
'-D',
'-', # print out HTML headers
'-L', # resolve 3xx redirects
self.url,
]
if not spack.config.get('config:verify_ssl'):
curl_args.append('-k')
if sys.stdout.isatty():
curl_args.append('-#') # status bar when using a tty
else:
curl_args.append('-sS') # just errors when not.
curl_args += self.extra_curl_options
# Run curl but grab the mime type from the http headers
curl = self.curl
with working_dir(self.stage.path):
headers = curl(*curl_args, output=str, fail_on_error=False)
if curl.returncode != 0:
# clean up archive on failure.
if self.archive_file:
os.remove(self.archive_file)
if partial_file and os.path.exists(partial_file):
os.remove(partial_file)
if curl.returncode == 22:
# This is a 404. Curl will print the error.
raise FailedDownloadError(
self.url, "URL %s was not found!" % self.url)
elif curl.returncode == 60:
# This is a certificate error. Suggest spack -k
raise FailedDownloadError(
self.url,
"Curl was unable to fetch due to invalid certificate. "
"This is either an attack, or your cluster's SSL "
"configuration is bad. If you believe your SSL "
"configuration is bad, you can try running spack -k, "
"which will not check SSL certificates."
"Use this at your own risk.")
else:
# This is some other curl error. Curl will print the
# error, but print a spack message too
raise FailedDownloadError(
self.url,
"Curl failed with error %d" % curl.returncode)
# Check if we somehow got an HTML file rather than the archive we
# asked for. We only look at the last content type, to handle
# redirects properly.
content_types = re.findall(r'Content-Type:[^\r\n]+', headers,
flags=re.IGNORECASE)
if content_types and 'text/html' in content_types[-1]:
tty.warn("The contents of ",
(self.archive_file if self.archive_file is not None
else "the archive"),
" look like HTML.",
"The checksum will likely be bad. If it is, you can use",
"'spack clean <package>' to remove the bad archive, then",
"fix your internet gateway issue and install again.")
if save_file:
os.rename(partial_file, save_file)
if not self.archive_file:
raise FailedDownloadError(self.url)
@property
def archive_file(self):
"""Path to the source archive within this stage directory."""
return self.stage.archive_file
@property
def cachable(self):
return bool(self.digest)
@_needs_stage
def expand(self):
if not self.expand_archive:
tty.msg("Skipping expand step for %s" % self.archive_file)
return
tty.msg("Staging archive: %s" % self.archive_file)
if not self.archive_file:
raise NoArchiveFileError(
"Couldn't find archive file",
"Failed on expand() for URL %s" % self.url)
if not self.extension:
self.extension = extension(self.archive_file)
decompress = decompressor_for(self.archive_file, self.extension)
# Expand all tarballs in their own directory to contain
# exploding tarballs.
tarball_container = os.path.join(self.stage.path,
"spack-expanded-archive")
mkdirp(tarball_container)
with working_dir(tarball_container):
decompress(self.archive_file)
# Check for an exploding tarball, i.e. one that doesn't expand
# to a single directory. If the tarball *didn't* explode,
# move contents up & remove the container directory.
#
# NOTE: The tar program on Mac OS X will encode HFS metadata
# in hidden files, which can end up *alongside* a single
# top-level directory. We ignore hidden files to accomodate
# these "semi-exploding" tarballs.
files = os.listdir(tarball_container)
non_hidden = [f for f in files if not f.startswith('.')]
if len(non_hidden) == 1:
expanded_dir = os.path.join(tarball_container, non_hidden[0])
if os.path.isdir(expanded_dir):
for f in files:
shutil.move(os.path.join(tarball_container, f),
os.path.join(self.stage.path, f))
os.rmdir(tarball_container)
if not files:
os.rmdir(tarball_container)
def archive(self, destination):
"""Just moves this archive to the destination."""
if not self.archive_file:
raise NoArchiveFileError("Cannot call archive() before fetching.")
shutil.copyfile(self.archive_file, destination)
@_needs_stage
def check(self):
"""Check the downloaded archive against a checksum digest.
No-op if this stage checks code out of a repository."""
if not self.digest:
raise NoDigestError(
"Attempt to check URLFetchStrategy with no digest.")
checker = crypto.Checker(self.digest)
if not checker.check(self.archive_file):
raise ChecksumError(
"%s checksum failed for %s" %
(checker.hash_name, self.archive_file),
"Expected %s but got %s" % (self.digest, checker.sum))
@_needs_stage
def reset(self):
"""
Removes the source path if it exists, then re-expands the archive.
"""
if not self.archive_file:
raise NoArchiveFileError(
"Tried to reset URLFetchStrategy before fetching",
"Failed on reset() for URL %s" % self.url)
# Remove everythigng but the archive from the stage
for filename in os.listdir(self.stage.path):
abspath = os.path.join(self.stage.path, filename)
if abspath != self.archive_file:
shutil.rmtree(abspath, ignore_errors=True)
# Expand the archive again
self.expand()
def __repr__(self):
url = self.url if self.url else "no url"
return "%s<%s>" % (self.__class__.__name__, url)
def __str__(self):
if self.url:
return self.url
else:
return "[no url]"
class CacheURLFetchStrategy(URLFetchStrategy):
"""The resource associated with a cache URL may be out of date."""
def __init__(self, *args, **kwargs):
super(CacheURLFetchStrategy, self).__init__(*args, **kwargs)
@_needs_stage
def fetch(self):
path = re.sub('^file://', '', self.url)
# check whether the cache file exists.
if not os.path.isfile(path):
raise NoCacheError('No cache of %s' % path)
# remove old symlink if one is there.
filename = self.stage.save_filename
if os.path.exists(filename):
os.remove(filename)
# Symlink to local cached archive.
os.symlink(path, filename)
# Remove link if checksum fails, or subsequent fetchers
# will assume they don't need to download.
if self.digest:
try:
self.check()
except ChecksumError:
os.remove(self.archive_file)
raise
# Notify the user how we fetched.
tty.msg('Using cached archive: %s' % path)
class VCSFetchStrategy(FetchStrategy):
def __init__(self, name, *rev_types, **kwargs):
super(VCSFetchStrategy, self).__init__()
self.name = name
# Set a URL based on the type of fetch strategy.
self.url = kwargs.get(name, None)
if not self.url:
raise ValueError(
"%s requires %s argument." % (self.__class__, name))
# Ensure that there's only one of the rev_types
if sum(k in kwargs for k in rev_types) > 1:
raise ValueError(
"Supply only one of %s to fetch with %s" % (
comma_or(rev_types), name
))
# Set attributes for each rev type.
for rt in rev_types:
setattr(self, rt, kwargs.get(rt, None))
@_needs_stage
def check(self):
tty.msg("No checksum needed when fetching with %s" % self.name)
@_needs_stage
def expand(self):
tty.debug("Source fetched with %s is already expanded." % self.name)
@_needs_stage
def archive(self, destination, **kwargs):
assert (extension(destination) == 'tar.gz')
assert (self.stage.source_path.startswith(self.stage.path))
tar = which('tar', required=True)
patterns = kwargs.get('exclude', None)
if patterns is not None:
if isinstance(patterns, string_types):
patterns = [patterns]
for p in patterns:
tar.add_default_arg('--exclude=%s' % p)
with working_dir(self.stage.path):
tar('-czf', destination, os.path.basename(self.stage.source_path))
def __str__(self):
return "VCS: %s" % self.url
def __repr__(self):
return "%s<%s>" % (self.__class__, self.url)
class GoFetchStrategy(VCSFetchStrategy):
"""Fetch strategy that employs the `go get` infrastructure.
Use like this in a package:
version('name',
go='github.com/monochromegane/the_platinum_searcher/...')
Go get does not natively support versions, they can be faked with git
"""
enabled = True
required_attributes = ('go', )
def __init__(self, **kwargs):
# Discards the keywords in kwargs that may conflict with the next
# call to __init__
forwarded_args = copy.copy(kwargs)
forwarded_args.pop('name', None)
super(GoFetchStrategy, self).__init__('go', **forwarded_args)
self._go = None
@property
def go_version(self):
vstring = self.go('version', output=str).split(' ')[2]
return Version(vstring)
@property
def go(self):
if not self._go:
self._go = which('go', required=True)
return self._go
@_needs_stage
def fetch(self):
tty.msg("Trying to get go resource:", self.url)
with working_dir(self.stage.path):
try:
os.mkdir('go')
except OSError:
pass
env = dict(os.environ)
env['GOPATH'] = os.path.join(os.getcwd(), 'go')
self.go('get', '-v', '-d', self.url, env=env)
def archive(self, destination):
super(GoFetchStrategy, self).archive(destination, exclude='.git')
@_needs_stage
def reset(self):
with working_dir(self.stage.source_path):
self.go('clean')
def __str__(self):
return "[go] %s" % self.url
class GitFetchStrategy(VCSFetchStrategy):
"""
Fetch strategy that gets source code from a git repository.
Use like this in a package:
version('name', git='https://github.com/project/repo.git')
Optionally, you can provide a branch, or commit to check out, e.g.:
version('1.1', git='https://github.com/project/repo.git', tag='v1.1')
You can use these three optional attributes in addition to ``git``:
* ``branch``: Particular branch to build from (default is master)
* ``tag``: Particular tag to check out
* ``commit``: Particular commit hash in the repo
"""
enabled = True
required_attributes = ('git', )
def __init__(self, **kwargs):
# Discards the keywords in kwargs that may conflict with the next call
# to __init__
forwarded_args = copy.copy(kwargs)
forwarded_args.pop('name', None)
super(GitFetchStrategy, self).__init__(
'git', 'tag', 'branch', 'commit', **forwarded_args)
self._git = None
self.submodules = kwargs.get('submodules', False)
@property
def git_version(self):
vstring = self.git('--version', output=str).lstrip('git version ')
return Version(vstring)
@property
def git(self):
if not self._git:
self._git = which('git', required=True)
# If the user asked for insecure fetching, make that work
# with git as well.
if not spack.config.get('config:verify_ssl'):
self._git.add_default_env('GIT_SSL_NO_VERIFY', 'true')
return self._git
@property
def cachable(self):
return bool(self.commit or self.tag)
def source_id(self):
return self.commit or self.tag
def get_source_id(self):
if not self.branch:
return
output = self.git('ls-remote', self.url, self.branch, output=str)
if output:
return output.split()[0]
def fetch(self):
if self.stage.source_path:
tty.msg("Already fetched %s" % self.stage.source_path)
return
args = ''
if self.commit:
args = 'at commit %s' % self.commit
elif self.tag:
args = 'at tag %s' % self.tag
elif self.branch:
args = 'on branch %s' % self.branch
tty.msg("Trying to clone git repository: %s %s" % (self.url, args))
git = self.git
if self.commit:
# Need to do a regular clone and check out everything if
# they asked for a particular commit.
with working_dir(self.stage.path):
if spack.config.get('config:debug'):
git('clone', self.url)
else:
git('clone', '--quiet', self.url)
with working_dir(self.stage.source_path):
if spack.config.get('config:debug'):
git('checkout', self.commit)
else:
git('checkout', '--quiet', self.commit)
else:
# Can be more efficient if not checking out a specific commit.
args = ['clone']
if not spack.config.get('config:debug'):
args.append('--quiet')
# If we want a particular branch ask for it.
if self.branch:
args.extend(['--branch', self.branch])
elif self.tag and self.git_version >= ver('1.8.5.2'):
args.extend(['--branch', self.tag])
# Try to be efficient if we're using a new enough git.
# This checks out only one branch's history
if self.git_version > ver('1.7.10'):
args.append('--single-branch')
with working_dir(self.stage.path):
cloned = False
# Yet more efficiency, only download a 1-commit deep tree
if self.git_version >= ver('1.7.1'):
try:
git(*(args + ['--depth', '1', self.url]))
cloned = True
except spack.error.SpackError:
# This will fail with the dumb HTTP transport
# continue and try without depth, cleanup first
pass
if not cloned:
args.append(self.url)
git(*args)
with working_dir(self.stage.source_path):
# For tags, be conservative and check them out AFTER
# cloning. Later git versions can do this with clone
# --branch, but older ones fail.
if self.tag and self.git_version < ver('1.8.5.2'):
# pull --tags returns a "special" error code of 1 in
# older versions that we have to ignore.
# see: https://github.com/git/git/commit/19d122b
if spack.config.get('config:debug'):
git('pull', '--tags', ignore_errors=1)
git('checkout', self.tag)
else:
git('pull', '--quiet', '--tags', ignore_errors=1)
git('checkout', '--quiet', self.tag)
with working_dir(self.stage.source_path):
# Init submodules if the user asked for them.
if self.submodules:
if spack.config.get('config:debug'):
git('submodule', 'update', '--init', '--recursive')
else:
git('submodule', '--quiet', 'update', '--init',
'--recursive')
def archive(self, destination):
super(GitFetchStrategy, self).archive(destination, exclude='.git')
@_needs_stage
def reset(self):
with working_dir(self.stage.source_path):
if spack.config.get('config:debug'):
self.git('checkout', '.')
self.git('clean', '-f')
else:
self.git('checkout', '--quiet', '.')
self.git('clean', '--quiet', '-f')
def __str__(self):
return "[git] %s" % self.url
class SvnFetchStrategy(VCSFetchStrategy):
"""Fetch strategy that gets source code from a subversion repository.
Use like this in a package:
version('name', svn='http://www.example.com/svn/trunk')
Optionally, you can provide a revision for the URL:
version('name', svn='http://www.example.com/svn/trunk',
revision='1641')
"""
enabled = True
required_attributes = ['svn']
def __init__(self, **kwargs):
# Discards the keywords in kwargs that may conflict with the next call
# to __init__
forwarded_args = copy.copy(kwargs)
forwarded_args.pop('name', None)
super(SvnFetchStrategy, self).__init__(
'svn', 'revision', **forwarded_args)
self._svn = None
if self.revision is not None:
self.revision = str(self.revision)
@property
def svn(self):
if not self._svn:
self._svn = which('svn', required=True)
return self._svn
@property
def cachable(self):
return bool(self.revision)
def source_id(self):
return self.revision
def get_source_id(self):
output = self.svn('info', self.url, output=str)
if not output:
return None
lines = output.split('\n')
for line in lines:
if line.startswith('Revision:'):
return line.split()[-1]
@_needs_stage
def fetch(self):
if self.stage.source_path:
tty.msg("Already fetched %s" % self.stage.source_path)
return
tty.msg("Trying to check out svn repository: %s" % self.url)
args = ['checkout', '--force', '--quiet']
if self.revision:
args += ['-r', self.revision]
args.append(self.url)
with working_dir(self.stage.path):
self.svn(*args)
def _remove_untracked_files(self):
"""Removes untracked files in an svn repository."""
with working_dir(self.stage.source_path):
status = self.svn('status', '--no-ignore', output=str)
self.svn('status', '--no-ignore')
for line in status.split('\n'):
if not re.match('^[I?]', line):
continue
path = line[8:].strip()
if os.path.isfile(path):
os.unlink(path)
elif os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
def archive(self, destination):
super(SvnFetchStrategy, self).archive(destination, exclude='.svn')
@_needs_stage
def reset(self):
self._remove_untracked_files()
with working_dir(self.stage.source_path):
self.svn('revert', '.', '-R')
def __str__(self):
return "[svn] %s" % self.url
class HgFetchStrategy(VCSFetchStrategy):
"""
Fetch strategy that gets source code from a Mercurial repository.
Use like this in a package:
version('name', hg='https://jay.grs.rwth-aachen.de/hg/lwm2')
Optionally, you can provide a branch, or revision to check out, e.g.:
version('torus',
hg='https://jay.grs.rwth-aachen.de/hg/lwm2', branch='torus')
You can use the optional 'revision' attribute to check out a
branch, tag, or particular revision in hg. To prevent
non-reproducible builds, using a moving target like a branch is
discouraged.
* ``revision``: Particular revision, branch, or tag.
"""
enabled = True
required_attributes = ['hg']
def __init__(self, **kwargs):
# Discards the keywords in kwargs that may conflict with the next call
# to __init__
forwarded_args = copy.copy(kwargs)
forwarded_args.pop('name', None)
super(HgFetchStrategy, self).__init__(
'hg', 'revision', **forwarded_args)
self._hg = None
@property
def hg(self):
""":returns: The hg executable
:rtype: Executable
"""
if not self._hg:
self._hg = which('hg', required=True)
# When building PythonPackages, Spack automatically sets
# PYTHONPATH. This can interfere with hg, which is a Python
# script. Unset PYTHONPATH while running hg.
self._hg.add_default_env('PYTHONPATH', '')
return self._hg
@property
def cachable(self):
return bool(self.revision)
def source_id(self):
return self.revision
def get_source_id(self):
output = self.hg('id', self.url, output=str)
if output:
return output.strip()
@_needs_stage
def fetch(self):
if self.stage.source_path:
tty.msg("Already fetched %s" % self.stage.source_path)
return
args = []
if self.revision:
args.append('at revision %s' % self.revision)
tty.msg("Trying to clone Mercurial repository:", self.url, *args)
args = ['clone']
if not spack.config.get('config:verify_ssl'):
args.append('--insecure')
args.append(self.url)
if self.revision:
args.extend(['-r', self.revision])
with working_dir(self.stage.path):
self.hg(*args)
def archive(self, destination):
super(HgFetchStrategy, self).archive(destination, exclude='.hg')
@_needs_stage
def reset(self):
with working_dir(self.stage.path):
source_path = self.stage.source_path
scrubbed = "scrubbed-source-tmp"
args = ['clone']
if self.revision:
args += ['-r', self.revision]
args += [source_path, scrubbed]
self.hg(*args)
shutil.rmtree(source_path, ignore_errors=True)
shutil.move(scrubbed, source_path)
def __str__(self):
return "[hg] %s" % self.url
def from_url(url):
"""Given a URL, find an appropriate fetch strategy for it.
Currently just gives you a URLFetchStrategy that uses curl.
TODO: make this return appropriate fetch strategies for other
types of URLs.
"""
return URLFetchStrategy(url)
def from_kwargs(**kwargs):
"""Construct an appropriate FetchStrategy from the given keyword arguments.
Args:
**kwargs: dictionary of keyword arguments, e.g. from a
``version()`` directive in a package.
Returns:
fetch_strategy: The fetch strategy that matches the args, based
on attribute names (e.g., ``git``, ``hg``, etc.)
Raises:
FetchError: If no ``fetch_strategy`` matches the args.
"""
for fetcher in all_strategies:
if fetcher.matches(kwargs):
return fetcher(**kwargs)
# Raise an error in case we can't instantiate any known strategy
message = "Cannot instantiate any FetchStrategy"
long_message = message + " from the given arguments : {arguments}".format(
arguments=kwargs)
raise FetchError(message, long_message)
def args_are_for(args, fetcher):
fetcher.matches(args)
def for_package_version(pkg, version):
"""Determine a fetch strategy based on the arguments supplied to
version() in the package description."""
# If it's not a known version, extrapolate one.
if version not in pkg.versions:
url = pkg.url_for_version(version)
if not url:
raise InvalidArgsError(pkg, version)
return URLFetchStrategy(url)
# Grab a dict of args out of the package version dict
args = pkg.versions[version]
# Test all strategies against per-version arguments.
for fetcher in all_strategies:
if fetcher.matches(args):
return fetcher(**args)
# If nothing matched for a *specific* version, test all strategies
# against
for fetcher in all_strategies:
attrs = dict((attr, getattr(pkg, attr, None))
for attr in fetcher.required_attributes)
if 'url' in attrs:
attrs['url'] = pkg.url_for_version(version)
attrs.update(args)
if fetcher.matches(attrs):
return fetcher(**attrs)
raise InvalidArgsError(pkg, version)
def from_list_url(pkg):
"""If a package provides a URL which lists URLs for resources by
version, this can can create a fetcher for a URL discovered for
the specified package's version."""
if pkg.list_url:
try:
versions = pkg.fetch_remote_versions()
try:
url_from_list = versions[pkg.version]
digest = None
if pkg.version in pkg.versions:
digest = pkg.versions[pkg.version].get('md5', None)
return URLFetchStrategy(url=url_from_list, digest=digest)
except KeyError:
tty.msg("Can not find version %s in url_list" %
pkg.version)
except BaseException:
tty.msg("Could not determine url from list_url.")
class FsCache(object):
def __init__(self, root):
self.root = os.path.abspath(root)
def store(self, fetcher, relativeDst):
# skip fetchers that aren't cachable
if not fetcher.cachable:
return
# Don't store things that are already cached.
if isinstance(fetcher, CacheURLFetchStrategy):
return
dst = os.path.join(self.root, relativeDst)
mkdirp(os.path.dirname(dst))
fetcher.archive(dst)
def fetcher(self, targetPath, digest, **kwargs):
path = os.path.join(self.root, targetPath)
return CacheURLFetchStrategy(path, digest, **kwargs)
def destroy(self):
shutil.rmtree(self.root, ignore_errors=True)
class FetchError(spack.error.SpackError):
"""Superclass fo fetcher errors."""
class NoCacheError(FetchError):
"""Raised when there is no cached archive for a package."""
class FailedDownloadError(FetchError):
"""Raised wen a download fails."""
def __init__(self, url, msg=""):
super(FailedDownloadError, self).__init__(
"Failed to fetch file from URL: %s" % url, msg)
self.url = url
class NoArchiveFileError(FetchError):
""""Raised when an archive file is expected but none exists."""
class NoDigestError(FetchError):
"""Raised after attempt to checksum when URL has no digest."""
class InvalidArgsError(FetchError):
def __init__(self, pkg, version):
msg = ("Could not construct a fetch strategy for package %s at "
"version %s")
msg %= (pkg.name, version)
super(InvalidArgsError, self).__init__(msg)
class ChecksumError(FetchError):
"""Raised when archive fails to checksum."""
class NoStageError(FetchError):
"""Raised when fetch operations are called before set_stage()."""
def __init__(self, method):
super(NoStageError, self).__init__(
"Must call FetchStrategy.set_stage() before calling %s" %
method.__name__)
| tmerrick1/spack | lib/spack/spack/fetch_strategy.py | Python | lgpl-2.1 | 36,243 |
# -*- coding: utf-8 -*-
#
# The internetarchive module is a Python/CLI interface to Archive.org.
#
# Copyright (C) 2012-2021 Internet Archive
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Copy files in archive.org items.
usage:
ia copy <src-identifier>/<src-file> <dest-identifier>/<dest-file> [options]...
ia copy --help
options:
-h, --help
-m, --metadata=<key:value>... Metadata to add to your new item, if you are moving
the file to a new item.
--replace-metadata Only use metadata specified as argument,
do not copy any from the source item.
-H, --header=<key:value>... S3 HTTP headers to send with your request.
--ignore-file-metadata Do not copy file metadata.
-n, --no-derive Do not derive uploaded files.
--no-backup Turn off archive.org backups. Clobbered files
will not be saved to history/files/$key.~N~
[default: True].
"""
import sys
from urllib.parse import quote
from docopt import docopt, printable_usage
from schema import Schema, Use, Or, And, SchemaError
import internetarchive as ia
from internetarchive.cli.argparser import get_args_dict
from internetarchive.utils import get_s3_xml_text, merge_dictionaries
def assert_src_file_exists(src_location):
assert SRC_ITEM.exists
global SRC_FILE
src_filename = src_location.split('/', 1)[-1]
SRC_FILE = SRC_ITEM.get_file(src_filename)
assert SRC_FILE.exists
return True
def main(argv, session, cmd='copy'):
args = docopt(__doc__, argv=argv)
src_path = args['<src-identifier>/<src-file>']
dest_path = args['<dest-identifier>/<dest-file>']
# If src == dest, file gets deleted!
try:
assert src_path != dest_path
except AssertionError:
print('error: The source and destination files cannot be the same!',
file=sys.stderr)
sys.exit(1)
global SRC_ITEM
SRC_ITEM = session.get_item(src_path.split('/')[0])
# Validate args.
s = Schema({
str: Use(bool),
'<src-identifier>/<src-file>': And(str, And(And(str, lambda x: '/' in x,
error='Destination not formatted correctly. See usage example.'),
assert_src_file_exists, error=(
f'https://{session.host}/download/{src_path} does not exist. '
'Please check the identifier and filepath and retry.'))),
'<dest-identifier>/<dest-file>': And(str, lambda x: '/' in x,
error='Destination not formatted correctly. See usage example.'),
'--metadata': Or(None, And(Use(get_args_dict), dict),
error='--metadata must be formatted as --metadata="key:value"'),
'--replace-metadata': Use(bool),
'--header': Or(None, And(Use(get_args_dict), dict),
error='--header must be formatted as --header="key:value"'),
'--ignore-file-metadata': Use(bool),
})
try:
args = s.validate(args)
except SchemaError as exc:
# This module is sometimes called by other modules.
# Replace references to 'ia copy' in ___doc__ to 'ia {cmd}' for clarity.
usage = printable_usage(__doc__.replace('ia copy', f'ia {cmd}'))
print(f'{exc}\n{usage}', file=sys.stderr)
sys.exit(1)
args['--header']['x-amz-copy-source'] = f'/{quote(src_path)}'
# Copy the old metadata verbatim if no additional metadata is supplied,
# else combine the old and the new metadata in a sensible manner.
if args['--metadata'] or args['--replace-metadata']:
args['--header']['x-amz-metadata-directive'] = 'REPLACE'
else:
args['--header']['x-amz-metadata-directive'] = 'COPY'
# New metadata takes precedence over old metadata.
if not args['--replace-metadata']:
args['--metadata'] = merge_dictionaries(SRC_ITEM.metadata,
args['--metadata'])
# File metadata is copied by default but can be dropped.
if args['--ignore-file-metadata']:
file_metadata = None
else:
file_metadata = SRC_FILE.metadata
# Add keep-old-version by default.
if not args['--header'].get('x-archive-keep-old-version') and not args['--no-backup']:
args['--header']['x-archive-keep-old-version'] = '1'
url = f'{session.protocol}//s3.us.archive.org/{quote(dest_path)}'
queue_derive = True if args['--no-derive'] is False else False
req = ia.iarequest.S3Request(url=url,
method='PUT',
metadata=args['--metadata'],
file_metadata=file_metadata,
headers=args['--header'],
queue_derive=queue_derive,
access_key=session.access_key,
secret_key=session.secret_key)
p = req.prepare()
r = session.send(p)
if r.status_code != 200:
try:
msg = get_s3_xml_text(r.text)
except Exception as e:
msg = r.text
print(f'error: failed to {cmd} "{src_path}" to "{dest_path}" - {msg}', file=sys.stderr)
sys.exit(1)
elif cmd == 'copy':
print(f'success: copied "{src_path}" to "{dest_path}".', file=sys.stderr)
else:
return (r, SRC_FILE)
| jjjake/internetarchive | internetarchive/cli/ia_copy.py | Python | agpl-3.0 | 6,076 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pivoteer', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='indicatorrecord',
name='indicator',
field=models.CharField(null=True, max_length=253, blank=True),
),
migrations.AlterUniqueTogether(
name='indicatorrecord',
unique_together=set([('indicator', 'info_hash', 'info_source', 'info_date')]),
),
]
| gdit-cnd/RAPID | pivoteer/migrations/0002_addfield_indicator.py | Python | mit | 604 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
dslw -- Decent SpatiaLite Wrapper
Copyright (c) 2017 Garin Wally
MIT License; see LICENSE
Credits:
This project stands on the shoulders of giants. Notably:
* Another Python SQLite Wrapper (apsw; by Roger Binns)
* SQLite (by D. Richard Hipp et. al.)
* SpatiaLite (by Alessandro Furieri)
"""
import apsw
from _core import *
import utils
import config
#import dbspatial # A new fork of dslw
# =============================================================================
# GLOBALS
__version__ = u"0.1-alpha"
# PEP 249 globals -- just to say that we thought about it, just for a sec.
apilevel = u"incompliant"
# Set version vars
with SpatialDB(u":memory:", verbose=False) as conn:
_c = conn.cursor()
sqlite_version = _c.execute(
u"SELECT sqlite_version();").fetchone()[0]
spatialite_version = _c.execute(
u"SELECT spatialite_version();").fetchone()[0]
freexl_version = _c.execute(
u"SELECT freexl_version();").fetchone()[0]
proj4_version = _c.execute(
u"SELECT proj4_version();").fetchone()[0]
geos_version = _c.execute(
u"SELECT geos_version();").fetchone()[0]
lwgeom_version = _c.execute(
u"SELECT lwgeom_version();").fetchone()[0]
libxml2_version = _c.execute(
u"SELECT libxml2_version();").fetchone()[0]
_c.close()
| WindfallLabs/dslw | dslw/__init__.py | Python | mit | 1,366 |
from django.conf.urls import url
from . import views
app_name = 'gallery'
urlpatterns = [
url(r'^$', views.GalleryView.as_view(), name='gallery'),
url(r'^(?P<slug>[\w_-]+)/$', views.GalleryImageView.as_view(), name='images')
]
| VictorArnaud/sitepet | gallery/urls.py | Python | mpl-2.0 | 237 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""API for enabling v2 control flow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["enable_control_flow_v2"])
def enable_control_flow_v2(): # pylint: disable=invalid-name
"""Use control flow v2.
control flow v2 (cfv2) is an improved version of control flow in TensorFlow
with support for higher order derivatives. Enabling cfv2 will change the
graph/function representation of control flow, e.g., `tf.while_loop` and
`tf.cond` will generate functional `While` and `If` ops instead of low-level
`Switch`, `Merge` etc. ops. Note: Importing and running graphs exported
with old control flow will still be supported.
Calling tf.enable_control_flow_v2() lets you opt-in to this TensorFlow 2.0
feature.
Note: v2 control flow is always enabled inside of tf.function. Calling this
function is not required.
"""
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
@tf_export(v1=["disable_control_flow_v2"])
def disable_control_flow_v2(): # pylint: disable=invalid-name
"""Opts out of control flow v2.
Note: v2 control flow is always enabled inside of tf.function. Calling this
function has no effect in that case.
If your code needs tf.disable_control_flow_v2() to be called to work
properly please file a bug.
"""
control_flow_util.ENABLE_CONTROL_FLOW_V2 = False
@tf_export(v1=["control_flow_v2_enabled"])
def control_flow_v2_enabled(): # pylint: disable=invalid-name
"""Returns `True` if v2 control flow is enabled.
Note: v2 control flow is always enabled inside of tf.function.
"""
return control_flow_util.EnableControlFlowV2(ops.get_default_graph())
@tf_export(v1=["experimental.output_all_intermediates"])
def output_all_intermediates(state): # pylint: disable=invalid-name
"""Whether to output all intermediates from functional control flow ops.
The "default" behavior to is to output all intermediates when using v2 control
flow inside Keras models in graph mode (possibly inside Estimators). This is
needed to support taking gradients of v2 control flow. In graph mode, Keras
can sometimes freeze the forward graph before the gradient computation which
does not work for v2 control flow since it requires updating the forward ops
to output the needed intermediates. We work around this by proactively
outputting the needed intermediates when building the forward pass itself.
Ideally any such extra tensors should be pruned out at runtime. However, if
for any reason this doesn't work for you or if you have an inference-only
model you can turn this behavior off using
`tf.compat.v1.experimental.output_all_intermediates(False)`.
If with the default behavior you are still seeing errors of the form
"Connecting to invalid output X of source node Y which has Z outputs" try
setting `tf.compat.v1.experimental.output_all_intermediates(True)` and
please file an issue at https://github.com/tensorflow/tensorflow/issues.
Args:
state: True, False or None. None restores the default behavior.
"""
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = state # pylint: disable=protected-access
| xzturn/tensorflow | tensorflow/python/ops/control_flow_v2_toggles.py | Python | apache-2.0 | 4,090 |
import os
import re
linkRegex = r"(?!_|\*).\[([^\[]+)\]\(([^\)]+)\)"
linkBoldRegex = r"\[([^\[]+)\]\(([^\)]+)\)\*\*"
fixBugs = r"line-numbers=off, "
for file in os.listdir("manuscript"):
if file.endswith(".txt"):
with open("manuscript/" + file) as f:
fileData = ""
fileContent = f.readlines()
for line in fileContent:
line = re.sub(fixBugs, r'', line.rstrip())
fileData += line + "\n"
text_file = open("manuscript/" + file, "w")
text_file.write(fileData)
text_file.close()
print "Book updated!" | UlisesGascon/javascript-inspirate | scripts/regex_fixer.py | Python | gpl-3.0 | 613 |
from colorama import init
from colorama import Fore
init()
def pytest_report_teststatus(report):
if report.when == 'call':
if hasattr(report, 'wasxfail'):
if report.skipped:
return "xfailed", Fore.YELLOW + "x" + Fore.RESET, "xfail"
elif report.passed:
return "xpassed", Fore.YELLOW + "p" + Fore.RESET, ("XPASS", {'yellow': True})
if report.passed:
letter = Fore.GREEN + "." + Fore.RESET
elif report.skipped:
letter = Fore.YELLOW + "s" + Fore.RESET
elif report.failed:
letter = Fore.RED + "F" + Fore.RESET
return report.outcome, letter, report.outcome.upper()
| svenstaro/pytest-colordots | pytest_colordots.py | Python | mit | 697 |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from hcat import hcat
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
from resource_management.core.logger import Logger
from resource_management.core.exceptions import ClientComponentHasNoStatus
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.script.script import Script
class HCatClient(Script):
def install(self, env):
import params
self.install_packages(env)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
hcat()
def status(self, env):
raise ClientComponentHasNoStatus()
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class HCatClientWindows(HCatClient):
pass
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class HCatClientDefault(HCatClient):
def get_component_name(self):
# HCat client doesn't have a first-class entry in <stack-selector-tool>. Since clients always
# update after daemons, this ensures that the hcat directories are correct on hosts
# which do not include the WebHCat daemon
return "hive-webhcat"
def pre_upgrade_restart(self, env, upgrade_type=None):
"""
Execute <stack-selector-tool> before reconfiguring this client to the new stack version.
:param env:
:param upgrade_type:
:return:
"""
Logger.info("Executing Hive HCat Client Stack Upgrade pre-restart")
import params
env.set_params(params)
# this function should not execute if the stack version does not support rolling upgrade
if not (params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version)):
return
# HCat client doesn't have a first-class entry in <stack-selector-tool>. Since clients always
# update after daemons, this ensures that the hcat directories are correct on hosts
# which do not include the WebHCat daemon
stack_select.select("hive-webhcat", params.version)
if __name__ == "__main__":
HCatClient().execute()
| radicalbit/ambari | contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hcat_client.py | Python | apache-2.0 | 2,941 |
import os
import io
import sys
import ssl
import urllib
from PIL import Image
import http.server
import moovoo
SIZE=(1200, 800)
class moovoo_server(http.server.HTTPServer):
def __init__(s, addr, handler):
http.server.HTTPServer.__init__(s, addr, handler)
s.ctxt = moovoo.Context()
s.modelBytes = open("../molecules/2tgt.cif", "rb").read()
s.model = moovoo.Model(s.ctxt, s.modelBytes)
s.view = moovoo.View(s.ctxt, s.model, SIZE[0], SIZE[1])
class moovoo_handler(http.server.BaseHTTPRequestHandler):
def do_GET(s):
url = urllib.parse.urlparse(s.path)
query = urllib.parse.parse_qs(url.query)
if s.path == '/':
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(b"<html><head><title>Title goes here.</title></head>")
s.wfile.write(b"<body><img src='1.jpg'></body></html>")
else:
s.send_response(200)
s.send_header("Connection", "close")
s.send_header("Max-Age", "0")
s.send_header("Expires", "0")
s.send_header("Cache-Control", "no-cache, private")
s.send_header("Pragma", "no-cache")
s.send_header("Content-Type", "multipart/x-mixed-replace; boundary=--BoundaryString");
s.end_headers()
while True:
data = s.server.view.render(s.server.ctxt, s.server.model)
pilimg = Image.frombuffer("RGBA", SIZE, data, "raw", "RGBA", 0, 1)
stream = io.BytesIO()
pilimg.save(stream, "PNG")
b = stream.getbuffer()
s.wfile.write(b"--BoundaryString\r\nContent-type: image/png\r\nContent-Length: %d\r\n\r\n" % len(b))
s.wfile.write(b)
def main(argv):
ip = "0.0.0.0"
port = 8000
httpd = moovoo_server((ip, port), moovoo_handler)
#httpd.socket = ssl.wrap_socket (httpd.socket, certfile='cert.pem', server_side=True)
while True:
httpd.handle_request()
if __name__ == "__main__":
try:
main(sys.argv)
except KeyboardInterrupt as k:
print("Keyboard interrupt")
| andy-thomason/moovoo | examples/server.py | Python | mit | 1,994 |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
demo template
Tested environment:
Mac OS X 10.6.8
"""
import sys
try:
from PySide import QtCore
from PySide import QtGui
except ImportError:
from PyQt4 import QtCore
from PyQt4 import QtGui
class SheetWin(QtGui.QWidget):
def __init__(self, parent = None):
super(SheetWin, self).__init__(parent)
self.setWindowFlags(QtCore.Qt.Sheet)
btn = QtGui.QPushButton("close", self)
btn.move(10, 10)
btn.clicked.connect(self.close)
class Demo(QtGui.QWidget):
def __init__(self):
super(Demo, self).__init__()
x, y, w, h = 500, 200, 300, 400
self.setGeometry(x, y, w, h)
btn = QtGui.QPushButton("btn", self)
btn.clicked.connect(self.btn_cb)
def btn_cb(self):
sw_obj = SheetWin(self)
sw_obj.show()
def show_and_raise(self):
self.show()
self.raise_()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
demo = Demo()
demo.show_and_raise()
sys.exit(app.exec_()) | alexlib/Qt-Python-Binding-Examples | windows/custom_win_flags.py | Python | bsd-3-clause | 1,096 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-26 09:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('exercises', '0004_exercise_author'),
]
operations = [
migrations.AlterField(
model_name='exercise',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
]
| FlowFX/unkenmathe.de | src/um/exercises/migrations/0005_auto_20170826_0942.py | Python | agpl-3.0 | 578 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
"""
See the README.rst in this directory for details on running these tests.
@todo allow using a database other than `development.db` - for some reason, a
test db is not created when running using normal settings
@todo when using database settings, a test database is used and this makes it
difficult for cleanup to track the layers created between runs
@todo only test_time seems to work correctly with database backend test settings
"""
from geonode.tests.base import GeoNodeBaseTestSupport
import os.path
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.urlresolvers import reverse
from geonode.layers.models import Layer
from geonode.maps.models import Map
from geonode.documents.models import Document
from geonode.people.models import Profile
from geonode.upload.models import Upload
from geonode.upload.utils import _ALLOW_TIME_STEP
from geonode.geoserver.helpers import ogc_server_settings
from geonode.geoserver.helpers import cascading_delete
from geonode.geoserver.signals import gs_catalog
from geoserver.catalog import Catalog
# from geonode.upload.utils import make_geogig_rest_payload
from gisdata import BAD_DATA
from gisdata import GOOD_DATA
from owslib.wms import WebMapService
from poster.encode import multipart_encode, MultipartParam
from poster.streaminghttp import register_openers
# from urllib2 import HTTPError
from zipfile import ZipFile
import re
import os
import csv
import glob
import time
import json
# import signal
import urllib
import urllib2
import logging
import tempfile
import unittest
# import subprocess
import dj_database_url
GEONODE_USER = 'admin'
GEONODE_PASSWD = 'admin'
GEONODE_URL = settings.SITEURL.rstrip('/')
GEOSERVER_URL = ogc_server_settings.LOCATION
GEOSERVER_USER, GEOSERVER_PASSWD = ogc_server_settings.credentials
logging.getLogger('south').setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
# create test user if needed, delete all layers and set password
u, created = Profile.objects.get_or_create(username=GEONODE_USER)
if created:
u.set_password(GEONODE_PASSWD)
u.save()
else:
Layer.objects.filter(owner=u).delete()
def upload_step(step=None):
step = reverse('data_upload', args=[step] if step else [])
return step
def get_wms(version='1.1.1', type_name=None):
""" Function to return an OWSLib WMS object """
# right now owslib does not support auth for get caps
# requests. Either we should roll our own or fix owslib
if type_name:
url = GEOSERVER_URL + \
'%s/wms?request=getcapabilities' % type_name.replace(':', '/')
else:
url = GEOSERVER_URL + \
'/wms?request=getcapabilities'
return WebMapService(
url,
version=version,
username=GEOSERVER_USER,
password=GEOSERVER_PASSWD
)
class Client(object):
"""client for making http requests"""
def __init__(self, url, user, passwd):
self.url = url
self.user = user
self.passwd = passwd
self.csrf_token = None
self.opener = self._init_url_opener()
def _init_url_opener(self):
self.cookies = urllib2.HTTPCookieProcessor()
opener = register_openers()
opener.add_handler(self.cookies) # Add cookie handler
return opener
def make_request(self, path, data=None,
ajax=False, debug=True):
url = path if path.startswith("http") else self.url + path
if ajax:
url += '&ajax=true' if '?' in url else '?ajax=true'
request = None
if data:
items = []
# wrap post parameters
for name, value in data.items():
if isinstance(value, file):
# add file
items.append(MultipartParam.from_file(name, value.name))
else:
items.append(MultipartParam(name, value))
datagen, headers = multipart_encode(items)
request = urllib2.Request(url, datagen, headers)
else:
request = urllib2.Request(url=url)
if ajax:
request.add_header('X_REQUESTED_WITH', 'XMLHttpRequest')
try:
# return urllib2.urlopen(request)
return self.opener.open(request)
except urllib2.HTTPError as ex:
if not debug:
raise
logger.error('error in request to %s' % path)
logger.error(ex.reason)
logger.error(ex.read())
raise
def get(self, path, debug=True):
return self.make_request(path, debug=debug)
def login(self):
""" Method to login the GeoNode site"""
self.csrf_token = self.get_csrf_token()
params = {'csrfmiddlewaretoken': self.csrf_token,
'username': self.user,
'next': '/',
'password': self.passwd}
self.make_request(
reverse('account_login'),
data=params
)
self.csrf_token = self.get_csrf_token()
def upload_file(self, _file):
""" function that uploads a file, or a collection of files, to
the GeoNode"""
if not self.csrf_token:
self.login()
spatial_files = ("dbf_file", "shx_file", "prj_file")
base, ext = os.path.splitext(_file)
params = {
# make public since wms client doesn't do authentication
'permissions': '{ "users": {"AnonymousUser": ["view_resourcebase"]} , "groups":{}}',
'csrfmiddlewaretoken': self.csrf_token
}
# deal with shapefiles
if ext.lower() == '.shp':
for spatial_file in spatial_files:
ext, _ = spatial_file.split('_')
file_path = base + '.' + ext
# sometimes a shapefile is missing an extra file,
# allow for that
if os.path.exists(file_path):
params[spatial_file] = open(file_path, 'rb')
base_file = open(_file, 'rb')
params['base_file'] = base_file
resp = self.make_request(
upload_step(),
data=params,
ajax=True)
data = resp.read()
try:
return resp, json.loads(data)
except ValueError:
# raise ValueError(
# 'probably not json, status %s' %
# resp.getcode(),
# data)
return resp, data
def get_html(self, path, debug=True):
""" Method that make a get request and passes the results to bs4
Takes a path and returns a tuple
"""
resp = self.get(path, debug)
return resp, BeautifulSoup(resp.read())
def get_json(self, path):
resp = self.get(path)
return resp, json.loads(resp.read())
def get_csrf_token(self, last=False):
"""Get a csrf_token from the home page or read from the cookie jar
based on the last response
"""
if not last:
self.get('/')
csrf = [c for c in self.cookies.cookiejar if c.name == 'csrftoken']
return csrf[0].value if csrf else None
class UploaderBase(GeoNodeBaseTestSupport):
settings_overrides = []
@classmethod
def setUpClass(cls):
# super(UploaderBase, cls).setUpClass()
# make a test_settings module that will apply our overrides
# test_settings = ['from geonode.settings import *']
# using_test_settings = os.getenv('DJANGO_SETTINGS_MODULE') == 'geonode.upload.tests.test_settings'
# if using_test_settings:
# test_settings.append(
# 'from geonode.upload.tests.test_settings import *')
# for so in cls.settings_overrides:
# test_settings.append('%s=%s' % so)
# with open('integration_settings.py', 'w') as fp:
# fp.write('\n'.join(test_settings))
#
# # runserver with settings
# args = [
# 'python',
# 'manage.py',
# 'runserver',
# '--settings=integration_settings',
# '--verbosity=0']
# # see for details regarding os.setsid:
# # http://www.doughellmann.com/PyMOTW/subprocess/#process-groups-sessions
# cls._runserver = subprocess.Popen(
# args,
# preexec_fn=os.setsid)
# await startup
# cl = Client(
# GEONODE_URL, GEONODE_USER, GEONODE_PASSWD
# )
# for i in range(10):
# time.sleep(.2)
# try:
# cl.get_html('/', debug=False)
# break
# except:
# pass
# if cls._runserver.poll() is not None:
# raise Exception("Error starting server, check test.log")
#
# cls.client = Client(
# GEONODE_URL, GEONODE_USER, GEONODE_PASSWD
# )
# cls.catalog = Catalog(
# GEOSERVER_URL + 'rest', GEOSERVER_USER, GEOSERVER_PASSWD
# )
pass
@classmethod
def tearDownClass(cls):
# super(UploaderBase, cls).tearDownClass()
# kill server process group
# if cls._runserver.pid:
# os.killpg(cls._runserver.pid, signal.SIGKILL)
if os.path.exists('integration_settings.py'):
os.unlink('integration_settings.py')
def setUp(self):
# super(UploaderBase, self).setUp()
# await startup
cl = Client(
GEONODE_URL, GEONODE_USER, GEONODE_PASSWD
)
for i in range(10):
time.sleep(.2)
try:
cl.get_html('/', debug=False)
break
except BaseException:
pass
self.client = Client(
GEONODE_URL, GEONODE_USER, GEONODE_PASSWD
)
self.catalog = Catalog(
GEOSERVER_URL + 'rest', GEOSERVER_USER, GEOSERVER_PASSWD
)
self._tempfiles = []
# createlayer must use postgis as a datastore
# set temporary settings to use a postgis datastore
DB_HOST = settings.DATABASES['default']['HOST']
DB_PORT = settings.DATABASES['default']['PORT']
DB_NAME = settings.DATABASES['default']['NAME']
DB_USER = settings.DATABASES['default']['USER']
DB_PASSWORD = settings.DATABASES['default']['PASSWORD']
settings.DATASTORE_URL = 'postgis://{}:{}@{}:{}/{}'.format(
DB_USER,
DB_PASSWORD,
DB_HOST,
DB_PORT,
DB_NAME
)
postgis_db = dj_database_url.parse(
settings.DATASTORE_URL, conn_max_age=600)
settings.DATABASES['datastore'] = postgis_db
settings.OGC_SERVER['default']['DATASTORE'] = 'datastore'
def tearDown(self):
# super(UploaderBase, self).tearDown()
map(os.unlink, self._tempfiles)
# move to original settings
settings.OGC_SERVER['default']['DATASTORE'] = ''
del settings.DATABASES['datastore']
# Cleanup
Layer.objects.all().delete()
Map.objects.all().delete()
Document.objects.all().delete()
def check_layer_geonode_page(self, path):
""" Check that the final layer page render's correctly after
an layer is uploaded """
# the final url for uploader process. This does a redirect to
# the final layer page in geonode
resp, _ = self.client.get_html(path)
self.assertTrue(resp.code == 200)
self.assertTrue('content-type' in resp.headers)
def check_layer_geoserver_caps(self, type_name):
""" Check that a layer shows up in GeoServer's get
capabilities document """
# using owslib
wms = get_wms(type_name=type_name)
ws, layer_name = type_name.split(':')
self.assertTrue(layer_name in wms.contents,
'%s is not in %s' % (layer_name, wms.contents))
def check_layer_geoserver_rest(self, layer_name):
""" Check that a layer shows up in GeoServer rest api after
the uploader is done"""
# using gsconfig to test the geoserver rest api.
layer = self.catalog.get_layer(layer_name)
self.assertIsNotNone(layer is not None)
def check_and_pass_through_timestep(self, redirect_to):
time_step = upload_step('time')
srs_step = upload_step('srs')
if srs_step in redirect_to:
resp = self.client.make_request(redirect_to)
else:
self.assertTrue(time_step in redirect_to)
resp = self.client.make_request(redirect_to)
token = self.client.get_csrf_token(True)
self.assertEquals(resp.code, 200)
resp = self.client.make_request(
redirect_to, {'csrfmiddlewaretoken': token}, ajax=True)
data = json.loads(resp.read())
return resp, data
def complete_raster_upload(self, file_path, resp, data):
return self.complete_upload(file_path, resp, data, is_raster=True)
def check_save_step(self, resp, data):
"""Verify the initial save step"""
self.assertEquals(resp.code, 200)
self.assertTrue(isinstance(data, dict))
# make that the upload returns a success True key
self.assertTrue(data['success'], 'expected success but got %s' % data)
self.assertTrue('redirect_to' in data)
def complete_upload(self, file_path, resp, data, is_raster=False):
"""Method to check if a layer was correctly uploaded to the
GeoNode.
arguments: file path, the django http response
Checks to see if a layer is configured in Django
Checks to see if a layer is configured in GeoServer
checks the Rest API
checks the get cap document """
layer_name, ext = os.path.splitext(os.path.basename(file_path))
if not isinstance(data, basestring):
self.check_save_step(resp, data)
layer_page = self.finish_upload(
data['redirect_to'],
layer_name,
is_raster)
self.check_layer_complete(layer_page, layer_name)
def finish_upload(
self,
current_step,
layer_name,
is_raster=False,
skip_srs=False):
if not is_raster and _ALLOW_TIME_STEP:
resp, data = self.check_and_pass_through_timestep(current_step)
self.assertEquals(resp.code, 200)
if not isinstance(data, basestring):
if data['success']:
self.assertTrue(
data['success'],
'expected success but got %s' %
data)
self.assertTrue('redirect_to' in data)
current_step = data['redirect_to']
self.wait_for_progress(data.get('progress'))
if not is_raster and not skip_srs:
self.assertTrue(upload_step('srs') in current_step)
# if all is good, the srs step will redirect to the final page
resp = self.client.get(current_step)
content = json.loads(resp.read())
if not content.get('url') and content.get(
'redirect_to',
current_step) == upload_step('final'):
resp = self.client.get(content.get('redirect_to'))
else:
self.assertTrue(upload_step('final') in current_step)
resp = self.client.get(current_step)
self.assertEquals(resp.code, 200)
resp_js = resp.read()
try:
c = json.loads(resp_js)
url = c['url']
url = urllib.unquote(url)
# and the final page should redirect to the layer page
# @todo - make the check match completely (endswith at least)
# currently working around potential 'orphaned' db tables
self.assertTrue(
layer_name in url, 'expected %s in URL, got %s' %
(layer_name, url))
return url
except BaseException:
return current_step
def check_upload_model(self, original_name):
# we can only test this if we're using the same DB as the test instance
if not settings.OGC_SERVER['default']['DATASTORE']:
return
upload = None
try:
# AF: TODO Headhakes here... nose is not accessing to the test
# db!!!
uploads = Upload.objects.all()
if uploads:
upload = Upload.objects.filter(name=str(original_name)).last()
except Upload.DoesNotExist:
self.fail('expected to find Upload object for %s' % original_name)
# AF: TODO Headhakes here... nose is not accessing to the test db!!!
if upload:
self.assertTrue(upload.complete)
def check_layer_complete(self, layer_page, original_name):
'''check everything to verify the layer is complete'''
self.check_layer_geonode_page(layer_page)
# @todo use the original_name
# currently working around potential 'orphaned' db tables
# this grabs the name from the url (it might contain a 0)
type_name = os.path.basename(layer_page)
layer_name = original_name
try:
layer_name = type_name.split(':')[1]
except BaseException:
pass
# work around acl caching on geoserver side of things
caps_found = False
for i in range(10):
time.sleep(.5)
try:
self.check_layer_geoserver_caps(type_name)
caps_found = True
except BaseException:
pass
if caps_found:
self.check_layer_geoserver_rest(layer_name)
self.check_upload_model(layer_name)
else:
logger.warning(
"Could not recognize Layer %s on GeoServer WMS" %
original_name)
def check_invalid_projection(self, layer_name, resp, data):
""" Makes sure that we got the correct response from an layer
that can't be uploaded"""
self.assertTrue(resp.code, 200)
if not isinstance(data, basestring):
self.assertTrue(data['success'])
self.assertTrue(upload_step("srs") in data['redirect_to'])
resp, soup = self.client.get_html(data['redirect_to'])
# grab an h2 and find the name there as part of a message saying it's
# bad
h2 = soup.find_all(['h2'])[0]
self.assertTrue(str(h2).find(layer_name))
def upload_folder_of_files(self, folder, final_check, session_ids=None):
mains = ('.tif', '.shp', '.zip')
def is_main(_file):
_, ext = os.path.splitext(_file)
return (ext.lower() in mains)
main_files = filter(is_main, os.listdir(folder))
for main in main_files:
# get the abs path to the file
_file = os.path.join(folder, main)
base, _ = os.path.splitext(_file)
resp, data = self.client.upload_file(_file)
if session_ids is not None:
if not isinstance(data, basestring) and data.get('url'):
session_id = re.search(
r'.*id=(\d+)', data.get('url')).group(1)
if session_id:
session_ids += [session_id]
if not isinstance(data, basestring):
self.wait_for_progress(data.get('progress'))
final_check(base, resp, data)
def upload_file(self, fname, final_check,
check_name=None, session_ids=None):
if not check_name:
check_name, _ = os.path.splitext(fname)
resp, data = self.client.upload_file(fname)
if session_ids is not None:
if not isinstance(data, basestring):
if data.get('url'):
session_id = re.search(
r'.*id=(\d+)', data.get('url')).group(1)
if session_id:
session_ids += [session_id]
if not isinstance(data, basestring):
self.wait_for_progress(data.get('progress'))
final_check(check_name, resp, data)
def wait_for_progress(self, progress_url):
if progress_url:
resp = self.client.get(progress_url)
assert resp.getcode() == 200, 'Invalid progress status code'
raw_data = resp.read()
json_data = json.loads(raw_data)
# "COMPLETE" state means done
if json_data.get('state', '') == 'RUNNING':
time.sleep(0.1)
self.wait_for_progress(progress_url)
def temp_file(self, ext):
fd, abspath = tempfile.mkstemp(ext)
self._tempfiles.append(abspath)
return fd, abspath
def make_csv(self, *rows):
fd, abspath = self.temp_file('.csv')
fp = os.fdopen(fd, 'wb')
out = csv.writer(fp)
for r in rows:
out.writerow(r)
fp.close()
return abspath
class TestUpload(UploaderBase):
settings_overrides = []
def test_shp_upload(self):
""" Tests if a vector layer can be upload to a running GeoNode GeoServer"""
fname = os.path.join(
GOOD_DATA,
'vector',
'san_andres_y_providencia_water.shp')
self.upload_file(fname, self.complete_upload)
def test_raster_upload(self):
""" Tests if a raster layer can be upload to a running GeoNode GeoServer"""
fname = os.path.join(GOOD_DATA, 'raster', 'relief_san_andres.tif')
self.upload_file(fname, self.complete_raster_upload)
def test_zipped_upload(self):
"""Test uploading a zipped shapefile"""
fd, abspath = self.temp_file('.zip')
fp = os.fdopen(fd, 'wb')
zf = ZipFile(fp, 'w')
fpath = os.path.join(
GOOD_DATA,
'vector',
'san_andres_y_providencia_poi.*')
for f in glob.glob(fpath):
zf.write(f, os.path.basename(f))
zf.close()
self.upload_file(abspath, self.complete_upload,
check_name='san_andres_y_providencia_poi')
def test_invalid_layer_upload(self):
""" Tests the layers that are invalid and should not be uploaded"""
# this issue with this test is that the importer supports
# shapefiles without an .prj
invalid_path = os.path.join(BAD_DATA)
self.upload_folder_of_files(
invalid_path,
self.check_invalid_projection)
def test_coherent_importer_session(self):
""" Tests that the upload computes correctly next session IDs"""
session_ids = []
# First of all lets upload a raster
fname = os.path.join(GOOD_DATA, 'raster', 'relief_san_andres.tif')
self.assertTrue(os.path.isfile(fname))
self.upload_file(
fname,
self.complete_raster_upload,
session_ids=session_ids)
# Next force an invalid session
invalid_path = os.path.join(BAD_DATA)
self.upload_folder_of_files(
invalid_path,
self.check_invalid_projection, session_ids=session_ids)
# Finally try to upload a good file anc check the session IDs
fname = os.path.join(GOOD_DATA, 'raster', 'relief_san_andres.tif')
self.upload_file(
fname,
self.complete_raster_upload,
session_ids=session_ids)
self.assertTrue(len(session_ids) >= 0)
if len(session_ids) > 1:
self.assertTrue(int(session_ids[0]) < int(session_ids[1]))
def test_extension_not_implemented(self):
"""Verify a error message is return when an unsupported layer is
uploaded"""
# try to upload ourselves
# a python file is unsupported
unsupported_path = __file__
if unsupported_path.endswith('.pyc'):
unsupported_path = unsupported_path.rstrip('c')
# with self.assertRaises(HTTPError):
self.client.upload_file(unsupported_path)
def test_csv(self):
'''make sure a csv upload fails gracefully/normally when not activated'''
csv_file = self.make_csv(
['lat', 'lon', 'thing'], ['-100', '-40', 'foo'])
layer_name, ext = os.path.splitext(os.path.basename(csv_file))
resp, data = self.client.upload_file(csv_file)
self.assertEquals(resp.code, 200)
if not isinstance(data, basestring):
self.assertTrue('success' in data)
self.assertTrue(data['success'])
self.assertTrue(data['redirect_to'], "/upload/csv")
@unittest.skipUnless(ogc_server_settings.datastore_db,
'Vector datastore not enabled')
class TestUploadDBDataStore(UploaderBase):
settings_overrides = []
def test_csv(self):
"""Override the baseclass test and verify a correct CSV upload"""
csv_file = self.make_csv(
['lat', 'lon', 'thing'], ['-100', '-40', 'foo'])
layer_name, ext = os.path.splitext(os.path.basename(csv_file))
resp, form_data = self.client.upload_file(csv_file)
self.assertEquals(resp.code, 200)
if not isinstance(form_data, basestring):
self.check_save_step(resp, form_data)
csv_step = form_data['redirect_to']
self.assertTrue(upload_step('csv') in csv_step)
form_data = dict(
lat='lat',
lng='lon',
csrfmiddlewaretoken=self.client.get_csrf_token())
resp = self.client.make_request(csv_step, form_data)
content = json.loads(resp.read())
self.assertEquals(resp.code, 200)
self.assertTrue(upload_step('srs') in content['redirect_to'])
def test_time(self):
"""Verify that uploading time based shapefile works properly"""
cascading_delete(self.catalog, 'boxes_with_date')
timedir = os.path.join(GOOD_DATA, 'time')
layer_name = 'boxes_with_date'
shp = os.path.join(timedir, '%s.shp' % layer_name)
# get to time step
resp, data = self.client.upload_file(shp)
self.assertEquals(resp.code, 200)
if not isinstance(data, basestring):
self.wait_for_progress(data.get('progress'))
self.assertTrue(data['success'])
self.assertTrue(data['redirect_to'], upload_step('time'))
redirect_to = data['redirect_to']
resp, data = self.client.get_html(upload_step('time'))
self.assertEquals(resp.code, 200)
data = dict(csrfmiddlewaretoken=self.client.get_csrf_token(),
time_attribute='date',
presentation_strategy='LIST',
)
resp = self.client.make_request(redirect_to, data)
self.assertEquals(resp.code, 200)
resp_js = json.loads(resp.read())
if resp_js['success']:
url = resp_js['redirect_to']
resp = self.client.make_request(url, data)
url = json.loads(resp.read())['url']
self.assertTrue(
url.endswith(layer_name),
'expected url to end with %s, but got %s' %
(layer_name,
url))
self.assertEquals(resp.code, 200)
url = urllib.unquote(url)
self.check_layer_complete(url, layer_name)
wms = get_wms(type_name='geonode:%s' % layer_name)
layer_info = wms.items()[0][1]
self.assertEquals(100, len(layer_info.timepositions))
else:
self.assertTrue('error_msg' in resp_js)
self.assertTrue(
'Source SRS is not valid' in resp_js['error_msg'])
def test_configure_time(self):
layer_name = 'boxes_with_end_date'
# make sure it's not there (and configured)
cascading_delete(gs_catalog, layer_name)
def get_wms_timepositions():
alternate_name = 'geonode:%s' % layer_name
if alternate_name in get_wms().contents:
metadata = get_wms().contents[alternate_name]
self.assertTrue(metadata is not None)
return metadata.timepositions
else:
return None
thefile = os.path.join(
GOOD_DATA, 'time', '%s.shp' % layer_name
)
resp, data = self.client.upload_file(thefile)
# initial state is no positions or info
self.assertTrue(get_wms_timepositions() is None)
self.assertEquals(resp.code, 200)
# enable using interval and single attribute
if not isinstance(data, basestring):
self.wait_for_progress(data.get('progress'))
self.assertTrue(data['success'])
self.assertTrue(data['redirect_to'], upload_step('time'))
redirect_to = data['redirect_to']
resp, data = self.client.get_html(upload_step('time'))
self.assertEquals(resp.code, 200)
data = dict(csrfmiddlewaretoken=self.client.get_csrf_token(),
time_attribute='date',
time_end_attribute='enddate',
presentation_strategy='LIST',
)
resp = self.client.make_request(redirect_to, data)
self.assertEquals(resp.code, 200)
resp_js = json.loads(resp.read())
if resp_js['success']:
url = resp_js['redirect_to']
resp = self.client.make_request(url, data)
url = json.loads(resp.read())['url']
self.assertTrue(
url.endswith(layer_name),
'expected url to end with %s, but got %s' %
(layer_name,
url))
self.assertEquals(resp.code, 200)
url = urllib.unquote(url)
self.check_layer_complete(url, layer_name)
wms = get_wms(type_name='geonode:%s' % layer_name)
layer_info = wms.items()[0][1]
self.assertEquals(100, len(layer_info.timepositions))
else:
self.assertTrue('error_msg' in resp_js)
self.assertTrue(
'Source SRS is not valid' in resp_js['error_msg'])
# class GeogigTest(GeoNodeBaseTestSupport):
# port = 8000
# def test_payload_creation(self):
# '''Test formation of REST call to geoserver's geogig API'''
# author_name = "test"
# author_email = "testuser@geonode.org"
#
# # Test filebased geogig
# settings.OGC_SERVER['default']['PG_GEOGIG'] = False
# fb_message = {
# "authorName": author_name,
# "authorEmail": author_email,
# "parentDirectory":
# settings.OGC_SERVER['default']['GEOGIG_DATASTORE_DIR']
# }
# fb_payload = make_geogig_rest_payload(author_name, author_email)
# self.assertDictEqual(fb_message, fb_payload)
# self.assertEquals(json.dumps(fb_message, sort_keys=True),
# json.dumps(fb_payload, sort_keys=True))
#
# # Test postgres based geogig
# settings.OGC_SERVER['default']['PG_GEOGIG'] = True
# # Manually override the settings to simulate the REST call for postgres
# settings.DATABASES['test-pg'] = {
# "HOST": "localhost",
# "PORT": "5432",
# "NAME": "repos",
# "SCHEMA": "public",
# "USER": "geogig",
# "PASSWORD": "geogig"
# }
# settings.OGC_SERVER['default']['DATASTORE'] = 'test-pg'
#
# pg_message = {
# "authorName": author_name,
# "authorEmail": author_email,
# "dbHost": settings.DATABASES['test-pg']['HOST'],
# "dbPort": settings.DATABASES['test-pg']['PORT'],
# "dbName": settings.DATABASES['test-pg']['NAME'],
# "dbSchema": settings.DATABASES['test-pg']['SCHEMA'],
# "dbUser": settings.DATABASES['test-pg']['USER'],
# "dbPassword": settings.DATABASES['test-pg']['PASSWORD']
# }
#
# pg_payload = make_geogig_rest_payload(author_name, author_email)
# self.assertDictEqual(pg_message, pg_payload)
# self.assertEquals(json.dumps(pg_message, sort_keys=True),
# json.dumps(pg_payload, sort_keys=True))
| timlinux/geonode | geonode/upload/tests/integration.py | Python | gpl-3.0 | 33,505 |
"""
Class which allows property and dict-like access to a fixed set of instance
attributes. Attributes are locked by __slots__, however accessor methods
may be created/removed on instances, or defined by the subclass. An
INITIALIZED attribute is provided to signel completion of __init__()
for use by accessor methods (i.e. so they know when __init__ may be
setting values).
Subclasses must define a __slots__ class attribute containing the list
of attribute names to reserve. All additional subclass descendents
must explicitly copy __slots__ from the parent in their definition.
Users of subclass instances are expected to get/set/del attributes
only via the standard object or dict-like interface. i.e.
instance.attribute = whatever
or
instance['attribute'] = whatever
Internally, methods are free to call the accessor methods. Only
accessor methods should use the special __dict_*__() and __super_*__() methods.
These are there to allow convenient access to the internal dictionary
values and subclass-defined attributes (such as __slots__).
example:
class A(PropCan):
# Class with *attributes*
__slots__ = ('a', 'b')
# 'a' has defined a set/get/del by definition of method with prefix
# set_a, get_a, del_a
# 'b' doesn't have defined set/get/del then classic set/get/del will be
# called instead.
def __init__(self, a=1, b='b'):
super(A, self).__init__(a, b)
def set_a(self, value)
# If is_instance(obj, A) then obj.a = "val" call this method.
self.__dict_set__("a", value)
def get_a(self, value)
# If is_instance(obj, A) then xx = obj.a call this method.
return self.__dict_get__("a")
def del_a(self, value)
# If is_instance(obj, A) then del obj.a call this method.
self.__dict_del__("a")
class B(PropCan):
# Class without *attributes*
# ***** Even if class doesn't have attributes there should be
# defined __slots__ = []. Because it is preferred by new style of class.
# *****
__slots__ = []
def __init__(self):
super(B, self).__init__()
"""
class PropCanInternal(object):
"""
Semi-private methods for use only by PropCanBase subclasses (NOT instances)
"""
# The following methods are intended for use by accessor-methods
# where they may need to bypass the special attribute/key handling
def __dict_get__(self, key):
"""
Get a key unconditionally, w/o checking for accessor method or __slots__
"""
return dict.__getitem__(self, key)
def __dict_set__(self, key, value):
"""
Set a key unconditionally, w/o checking for accessor method or __slots__
"""
dict.__setitem__(self, key, value)
def __dict_del__(self, key):
"""
Del key unconditionally, w/o checking for accessor method or __slots__
"""
return dict.__delitem__(self, key)
def __super_get__(self, key):
"""
Get attribute unconditionally, w/o checking accessor method or __slots__
"""
return object.__getattribute__(self, key)
def __super_set__(self, key, value):
"""
Set attribute unconditionally, w/o checking accessor method or __slots__
"""
object.__setattr__(self, key, value)
def __super_del__(self, key):
"""
Del attribute unconditionally, w/o checking accessor method or __slots__
"""
object.__delattr__(self, key)
class classproperty(property):
def __get__(self, obj, type_):
data = self.fget.__get__(None, type_)()
return data
def __set__(self, obj, value):
cls = type(obj)
return self.fset.__get__(None, cls)(value)
class PropCanBase(dict, PropCanInternal):
"""
Objects with optional accessor methods and dict-like access to fixed set of keys
"""
# get_*(), set_*(), del_*() accessor methods called from subclass
# __init__ sometimes need special handling, this is the signal.
INITIALIZED = False
# Help debugging by making all slot values available in all subclasses
# cache the value on first call
___all_slots__ = None
@classproperty
@classmethod
def __all_slots__(cls):
if not cls.___all_slots__:
all_slots = []
for cls_slots in [getattr(_cls, '__slots__', [])
for _cls in cls.__mro__]:
all_slots += cls_slots
cls.___all_slots__ = tuple(all_slots)
return cls.___all_slots__
def __new__(cls, *args, **dargs):
if not hasattr(cls, '__slots__'):
raise NotImplementedError("Class '%s' must define __slots__ "
"property" % str(cls))
newone = super(PropCanBase, cls).__new__(cls, *args, **dargs)
cls.___all_slots__ = tuple()
return newone
def __init__(self, *args, **dargs):
"""
Initialize contents directly or by way of accessors
:param *args: Initial values for __slots__ keys, same as dict.
:param **dargs: Initial values for __slots__ keys, same as dict.
"""
# Params are initialized here, not in super
super(PropCanBase, self).__init__()
# No need to re-invent dict argument processing
values = dict(*args, **dargs)
for key in self.__all_slots__:
value = values.get(key, "@!@!@!SENTINEL!@!@!@")
if value is not "@!@!@!SENTINEL!@!@!@":
# Call accessor methods if present
self[key] = value
# Let accessor methods know initialization is complete
self.__super_set__('INITIALIZED', True)
def __getitem__(self, key):
try:
accessor = super(PropCanBase,
self).__getattribute__('get_%s' % key)
except AttributeError:
return super(PropCanBase, self).__getitem__(key)
return accessor()
def __setitem__(self, key, value):
self.__canhaz__(key, KeyError)
try:
accessor = super(PropCanBase,
self).__getattribute__('set_%s' % key)
except AttributeError:
return super(PropCanBase, self).__setitem__(key, value)
return accessor(value)
def __delitem__(self, key):
try:
accessor = super(PropCanBase,
self).__getattribute__('del_%s' % key)
except AttributeError:
return super(PropCanBase, self).__delitem__(key)
return accessor()
def __get__(self, key):
try:
# Attempt to call accessor methods first whenever possible
self.__canhaz__(key, KeyError)
return self.__getitem__(key)
except KeyError:
# Allow subclasses to define attributes if required
return super(PropCanBase, self).__getattribute__(key)
def __set__(self, key, value):
self.__canhaz__(key)
try:
return self.__setitem__(key, value)
except KeyError, detail:
# Prevent subclass instances from defining normal attributes
raise AttributeError(str(detail))
def __getattr__(self, key):
try:
# Attempt to call accessor methods first whenever possible
self.__canhaz__(key, KeyError)
return self.__getitem__(key)
except KeyError:
# Allow subclasses to define attributes if required
return super(PropCanBase, self).__getattribute__(key)
def __setattr__(self, key, value):
self.__canhaz__(key)
try:
return self.__setitem__(key, value)
except KeyError, detail:
# Prevent subclass instances from defining normal attributes
raise AttributeError(str(detail))
def __delattr__(self, key):
self.__canhaz__(key)
try:
return self.__delitem__(key)
except KeyError, detail:
# Prevent subclass instances from deleting normal attributes
raise AttributeError(str(detail))
def __canhaz__(self, key, excpt=AttributeError):
"""
Quickly determine if an accessor or instance attribute name is defined.
"""
slots = self.__all_slots__
keys = slots + ('get_%s' % key, 'set_%s' % key, 'del_%s' % key)
if key not in keys:
raise excpt("Key '%s' not found in super class attributes or in %s"
% (str(key), str(keys)))
def copy(self):
"""
Copy properties by value, not by reference.
"""
return self.__class__(dict(self))
class PropCan(PropCanBase):
"""
Special value handling on retrieval of None values
"""
def __len__(self):
length = 0
for key in self.__all_slots__:
# special None/False value handling
if self.__contains__(key):
length += 1
return length
def __contains__(self, key):
try:
value = self.__dict_get__(key)
except (KeyError, AttributeError):
return False
# Avoid inf. recursion if value == self
if issubclass(type(value), type(self)) or value:
return True
return False
def __eq__(self, other):
# special None/False value handling
return dict([(key, value) for key, value in self.items()]) == other
def __ne__(self, other):
return not self.__eq__(other)
def keys(self):
# special None/False value handling
return [key for key in self.__all_slots__
if self.__contains__(key)]
def values(self):
# special None/False value handling
return [self[key] for key in self.keys()]
def items(self):
return tuple([(key, self[key]) for key in self.keys()])
has_key = __contains__
def set_if_none(self, key, value):
"""
Set the value of key, only if it's not set or None
"""
if not key in self:
self[key] = value
def set_if_value_not_none(self, key, value):
"""
Set the value of key, only if value is not None
"""
if value:
self[key] = value
| spiceqa/virt-test | virttest/propcan.py | Python | gpl-2.0 | 10,270 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2008 Zikzakmedia S.L. (http://zikzakmedia.com)
# Jordi Esteve <jesteve@zikzakmedia.com>
#
# Copyright (C) 2011 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2011-2013 Agile Business Group sagl
# (<http://www.agilebg.com>)
# Ported to Odoo by Andrea Cometa <info@andreacometa.it>
# Ported to v8 API by Eneko Lacunza <elacunza@binovo.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
from openerp import models, fields, api
from openerp.osv import orm
class AccountMoveLine(models.Model):
_inherit = 'account.move.line'
invoice_origin = fields.Char(related='invoice.origin', string='Source Doc')
invoice_date = fields.Date(related='invoice.date_invoice',
string='Invoice Date')
partner_ref = fields.Char(related='partner_id.ref', string='Partner Ref')
payment_term_id = fields.Many2one('account.payment.term',
related='invoice.payment_term',
string='Payment Terms')
stored_invoice_id = fields.Many2one('account.invoice',
compute='_get_invoice',
string='Invoice', store=True)
@api.depends('move_id', 'invoice.move_id')
def _get_invoice(self):
for line in self:
inv_ids = self.env['account.invoice'].search(
[('move_id', '=', line.move_id.id)])
if len(inv_ids) > 1:
raise orm.except_orm(
_('Error'),
_('Inconsistent data: move %s has more than one invoice')
% line.move_id.name)
if line.invoice:
line.stored_invoice_id = inv_ids[0]
else:
line.stored_invoice_id = False
day = fields.Char(compute='_get_day', string='Day', size=16, store=True)
@api.depends('date_maturity')
def _get_day(self):
for line in self:
if line.date_maturity:
line.day = line.date_maturity
else:
line.day = False
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False,
submenu=False):
model_data_obj = self.env['ir.model.data']
ids = model_data_obj.search([
('module', '=', 'account_due_list'),
('name', '=', 'view_payments_tree')])
if ids:
view_payments_tree_id = model_data_obj.get_object_reference(
'account_due_list', 'view_payments_tree')
if ids and view_id == view_payments_tree_id[1]:
# Use due list
result = super(models.Model, self).fields_view_get(
view_id, view_type, toolbar=toolbar, submenu=submenu)
else:
# Use special views for account.move.line object
# (for ex. tree view contains user defined fields)
result = super(AccountMoveLine, self).fields_view_get(
view_id, view_type, toolbar=toolbar, submenu=submenu)
return result
| incaser/account-payment | account_due_list/account_move_line.py | Python | agpl-3.0 | 4,024 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A routine for constructing a circuit to exactly implement a unitary generated by
one-body rotations through the optimal Givens rotation network. Construction
of this circuit can be found in Optica Vol. 3, Issue 12, pp. 1460-1465 (2016).
This Givens network improves upon the parallel Givens network for implementing
basis rotations in Phys. Rev. Lett. 120, 110501 (2018).
"""
from typing import cast, Iterable, Sequence, Tuple
import numpy
import cirq
from openfermion.linalg import givens_matrix_elements, givens_rotate
from openfermion.circuits.gates import Ryxxy
class GivensTranspositionError(Exception):
pass
class GivensMatrixError(Exception):
pass
def optimal_givens_decomposition(qubits: Sequence[cirq.Qid],
unitary: numpy.ndarray
) -> Iterable[cirq.Operation]:
r"""
Implement a circuit that provides the unitary that is generated by
single-particle fermion generators
$$
U(v) = exp(log(v)_{p,q}(a_{p}^{\dagger}a_{q} - a_{q}^{\dagger}a_{p})
$$
This can be used for implementing an exact single-body basis rotation
Args:
qubits: Sequence of qubits to apply the operations over. The qubits
should be ordered in linear physical order.
unitary:
"""
N = unitary.shape[0]
right_rotations = []
left_rotations = []
for i in range(1, N):
if i % 2 == 1:
for j in range(0, i):
# eliminate U[N - j, i - j] by mixing U[N - j, i - j],
# U[N - j, i - j - 1] by right multiplication
# of a givens rotation matrix in column [i - j, i - j + 1]
gmat = givens_matrix_elements(unitary[N - j - 1, i - j - 1],
unitary[N - j - 1, i - j - 1 + 1],
which='left')
right_rotations.append((gmat.T, (i - j - 1, i - j)))
givens_rotate(unitary,
gmat.conj(),
i - j - 1,
i - j,
which='col')
else:
for j in range(1, i + 1):
# elimination of U[N + j - i, j] by mixing U[N + j - i, j] and
# U[N + j - i - 1, j] by left multiplication
# of a givens rotation that rotates row space
# [N + j - i - 1, N + j - i
gmat = givens_matrix_elements(unitary[N + j - i - 1 - 1, j - 1],
unitary[N + j - i - 1, j - 1],
which='right')
left_rotations.append((gmat, (N + j - i - 2, N + j - i - 1)))
givens_rotate(unitary,
gmat,
N + j - i - 2,
N + j - i - 1,
which='row')
new_left_rotations = []
for (left_gmat, (i, j)) in reversed(left_rotations):
phase_matrix = numpy.diag([unitary[i, i], unitary[j, j]])
matrix_to_decompose = left_gmat.conj().T.dot(phase_matrix)
new_givens_matrix = givens_matrix_elements(matrix_to_decompose[1, 0],
matrix_to_decompose[1, 1],
which='left')
new_phase_matrix = matrix_to_decompose.dot(new_givens_matrix.T)
# check if T_{m,n}^{-1}D = D T.
# coverage: ignore
if not numpy.allclose(new_phase_matrix.dot(new_givens_matrix.conj()),
matrix_to_decompose):
raise GivensTranspositionError("Failed to shift the phase matrix "
"from right to left")
# coverage: ignore
unitary[i, i], unitary[j, j] = new_phase_matrix[0, 0], new_phase_matrix[
1, 1]
new_left_rotations.append((new_givens_matrix.conj(), (i, j)))
phases = numpy.diag(unitary)
rotations = []
ordered_rotations = []
for (gmat, (i, j)) in list(reversed(new_left_rotations)) + list(
map(lambda x: (x[0].conj().T, x[1]), reversed(right_rotations))):
ordered_rotations.append((gmat, (i, j)))
# if this throws the impossible has happened
# coverage: ignore
if not numpy.isclose(gmat[0, 0].imag, 0.0):
raise GivensMatrixError(
"Givens matrix does not obey our convention that all elements "
"in the first column are real")
if not numpy.isclose(gmat[1, 0].imag, 0.0):
raise GivensMatrixError(
"Givens matrix does not obey our convention that all elements "
"in the first column are real")
# coverage: ignore
theta = numpy.arcsin(numpy.real(gmat[1, 0]))
phi = numpy.angle(gmat[1, 1])
rotations.append((i, j, theta, phi))
for op in reversed(rotations):
i, j, theta, phi = cast(Tuple[int, int, float, float], op)
if not numpy.isclose(phi, 0.0):
yield cirq.Z(qubits[j])**(phi / numpy.pi)
yield Ryxxy(-theta).on(qubits[i], qubits[j])
for idx, phase in enumerate(phases):
yield cirq.Z(qubits[idx])**(numpy.angle(phase) / numpy.pi)
| kevinsung/OpenFermion | src/openfermion/circuits/primitives/optimal_givens_decomposition.py | Python | apache-2.0 | 5,885 |
# Copyright (c) 2007-2009 Pedro Matiello <pmatiello@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Accessibility algorithms.
@sort: accessibility, connected_components, cut_edges, cut_nodes, mutual_accessibility
"""
# Transitive-closure
def accessibility(graph):
"""
Accessibility matrix (transitive closure).
@type graph: graph
@param graph: Graph.
@rtype: dictionary
@return: Accessibility information for each node.
"""
accessibility = {} # Accessibility matrix
# For each node i, mark each node j if that exists a path from i to j.
for each in graph:
access = {}
# Perform DFS to explore all reachable nodes
_dfs(graph, access, 1, each)
accessibility[each] = access.keys()
return accessibility
# Strongly connected components
def mutual_accessibility(graph):
"""
Mutual-accessibility matrix (strongly connected components).
@type graph: graph
@param graph: Graph.
@rtype: dictionary
@return: Mutual-accessibility information for each node.
"""
mutual_access = {}
access = graph.accessibility()
for i in graph:
mutual_access[i] = []
for j in graph:
if (i in access[j] and j in access[i]):
mutual_access[i].append(j)
return mutual_access
# Connected components
def connected_components(graph):
"""
Connected components.
@attention: Indentification of connected components is meaningful only for non-directed graphs.
@type graph: graph
@param graph: Graph.
@rtype: dictionary
@return: Pairing that associates each node to its connected component.
"""
visited = {}
count = 1
# For 'each' node not found to belong to a connected component, find its connected component.
for each in graph:
if (each not in visited):
_dfs(graph, visited, count, each)
count = count + 1
return visited
# Limited DFS implementations used by algorithms here
def _dfs(graph, visited, count, node):
"""
Depht-first search subfunction adapted for accessibility algorithms.
@type graph: graph
@param graph: Graph.
@type visited: dictionary
@param visited: List of nodes (visited nodes are marked non-zero).
@type count: number
@param count: Counter of connected components.
@type node: node
@param node: Node to be explored by DFS.
"""
visited[node] = count
# Explore recursively the connected component
for each in graph[node]:
if (each not in visited):
_dfs(graph, visited, count, each)
# Cut-Edge and Cut-Vertex identification
def cut_edges(graph):
"""
Return the cut-edges of the given graph.
@rtype: list
@return: List of cut-edges.
"""
pre = {}
low = {}
spanning_tree = {}
reply = []
pre[None] = 0
for each in graph:
if (not pre.has_key(each)):
spanning_tree[each] = None
_cut_dfs(graph, spanning_tree, pre, low, reply, each)
return reply
def cut_nodes(graph):
"""
Return the cut-nodes of the given graph.
@rtype: list
@return: List of cut-nodes.
"""
pre = {} # Pre-ordering
low = {} # Lowest pre[] reachable from this node going down the spanning tree + one backedge
reply = {}
spanning_tree = {}
pre[None] = 0
# Create spanning trees, calculate pre[], low[]
for each in graph:
if (not pre.has_key(each)):
spanning_tree[each] = None
_cut_dfs(graph, spanning_tree, pre, low, [], each)
# Find cuts
for each in graph:
# If node is not a root
if (spanning_tree[each] is not None):
for other in graph[each]:
# If there is no back-edge from descendent to a ancestral of each
if (low[other] >= pre[each] and spanning_tree[other] == each):
reply[each] = 1
# If node is a root
else:
children = 0
for other in graph:
if (spanning_tree[other] == each):
children = children + 1
# root is cut-vertex iff it has two or more children
if (children >= 2):
reply[each] = 1
return reply.keys()
def _cut_dfs(graph, spanning_tree, pre, low, reply, node):
"""
Depth first search adapted for identification of cut-edges and cut-nodes.
@type graph: graph
@param graph: Graph
@type spanning_tree: dictionary
@param spanning_tree: Spanning tree being built for the graph by DFS.
@type pre: dictionary
@param pre: Graph's preordering.
@type low: dictionary
@param low: Associates to each node, the preordering index of the node of lowest preordering
accessible from the given node.
@type reply: list
@param reply: List of cut-edges.
@type node: node
@param node: Node to be explored by DFS.
"""
pre[node] = pre[None]
low[node] = pre[None]
pre[None] = pre[None] + 1
for each in graph[node]:
if (not pre.has_key(each)):
spanning_tree[each] = node
_cut_dfs(graph, spanning_tree, pre, low, reply, each)
if (low[node] > low[each]):
low[node] = low[each]
if (low[each] == pre[each]):
reply.append((node, each))
elif (low[node] > pre[each] and spanning_tree[node] != each):
low[node] = pre[each]
| sebastienhupin/qxrad | qooxdoo/tool/pylib/graph/algorithms/accessibility.py | Python | lgpl-3.0 | 6,573 |
import sys
import os
import logging
import time
import argparse
import atexit
import signal
def check_directory():
if not os.path.exists(os.path.expanduser("~/.chatserver")):
os.makedirs(os.path.expanduser("~/.chatserver"))
sys.path.append(os.path.expanduser("~/.chatserver"))
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
def graceful_exit(signal, frame):
logging.info("Process sent SIGTERM, shutting down.")
sys.exit(0)
def daemonize():
if os.path.exists(os.path.expanduser("~/.chatserver/pid")):
sys.stderr.write("[FATAL] The daemon seems to be already running.\nTo kill the server, run 'sh server_app/stop_daemon_server.sh'.\nTo override this behaviour, delete ~/.chatserver/pid\n")
sys.exit(1)
try:
pid = os.fork()
if pid < 0:
sys.stderr.write("[FATAL] First fork failed: pid < 0\n")
sys.exit(1)
if pid > 0:
sys.exit(0)
except OSError, e:
sys.stderr.write("[FATAL] First fork failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
try:
os.chdir("/")
except:
sys.stderr.write("Failed to change cwd to root. This application may prevent some partitions from unmounting.\n")
try:
os.setsid()
except:
sys.stderr.write("[FATAL] setsid() failed. Try running with the --no-daemon option.\n")
sys.exit(1)
try:
os.umask(0)
except:
sys.stderr.write("umask(0) failed. Files created by this application may have unexpected properties.\n")
try:
pid = os.fork()
if pid < 0:
sys.stderr.write("[FATAL] Second fork failed: pid < 0")
sys.exit(1)
if pid > 0:
sys.exit(0)
except OSError, e:
sys.stderr.write("[FATAL] Second fork failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
try:
atexit.register(lambda: os.remove(os.path.expanduser("~/.chatserver/pid")))
pidfile = open(os.path.expanduser("~/.chatserver/pid"), "w")
pidfile.write(str(os.getpid()))
pidfile.close()
except:
sys.stderr.write("[FATAL] pid file writing failed. Try running with the --no-daemon option.\n")
try:
signal.signal(signal.SIGTERM, graceful_exit)
except Exception, e:
sys.stderr.write("[FATAL] Failed to register termination signal handler.\nTraceback:\n%s\n" %str(e))
sys.exit(1)
sys.stdout.write("[INFO] Now running server on daemon mode.\n")
sys.stdout.write("[INFO] Kill server by running 'sh server_app/stop_daemon_server.sh'\n")
sys.stdout.write("[INFO] To view logs for this server, navigate to ~/.chatserver directory and open the latest log. This is the last message that will not be logged to the file.\n")
def setup_logging(args):
logFormatter = logging.Formatter('%(asctime)s - [%(levelname)s] %(name)s: %(message)s')
root = logging.getLogger()
root.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(os.path.expanduser("~/.chatserver/chat-" + time.strftime("%d-%m-%Y.log")))
fileHandler.setFormatter(logFormatter)
root.addHandler(fileHandler)
if not args.daemon:
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
root.addHandler(consoleHandler)
def redirect_std_to_logging():
"""
Redirects stdout and stderr to log file.
Based on http://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/
"""
class StreamToLogger(object):
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
sys.stdin.close()
sys.stdout.close()
sys.stderr.close()
stdout_logger = logging.getLogger('STDOUT')
sys.stdout = StreamToLogger(stdout_logger, logging.INFO)
stderr_logger = logging.getLogger('STDERR')
sys.stderr = StreamToLogger(stderr_logger, logging.ERROR)
def setup_db():
try:
db.create_all()
except Exception, e:
logging.critical("Database creation failed: %s" %str(e))
def parse_arguments():
parser = argparse.ArgumentParser(description='Chat server <https://github.com/jos0003/Chat>')
parser.add_argument('-p', '--port', help="Choose port to run chat server on. If not set, Chat server will use the port from config.py file.", required=False, default=app.config['PORT'])
parser.add_argument('-t', '--no-daemon', dest='daemon', action='store_false', help="Disable daemon/service creation. Process will be tied to the terminal and logging will be done to stdout and stderr.")
parser.add_argument('-d', '--daemon', dest='daemon', action='store_true', help="Opposite of --no-daemon; creates a daemon or service. Enabled by default.")
parser.set_defaults(daemon=True)
return parser.parse_args(sys.argv[1:])
def run_server():
results = parse_arguments()
try:
results.port = int(results.port)
except:
sys.stderr.write("[FATAL] port must be an integer\n")
sys.exit(1)
setup_logging(results)
if results.daemon:
daemonize()
redirect_std_to_logging()
logging.info("Daemon spawned successfully, pid is %d" % os.getpid())
setup_db()
logging.info("Chat server is now starting on 0.0.0.0:%r" % results.port)
try:
socketio.run(app, host="0.0.0.0", port=results.port, use_reloader=False)
except Exception, e:
logging.critical("SocketIO failed: %s" % str(e))
sys.exit(1)
check_directory()
from app import app, db, main, socketio
run_server()
| jos0003/Chat | server_app/__main__.py | Python | bsd-3-clause | 5,855 |
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
Functions in this module are imported into the cinder.db namespace. Call these
functions from cinder.db namespace, not the cinder.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/cinder/cinder.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import options as db_options
from cinder.api import common
from cinder.common import constants
from cinder.i18n import _
db_opts = [
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create'),
cfg.StrOpt('volume_name_template',
default='volume-%s',
help='Template string to be used to generate volume names'),
cfg.StrOpt('snapshot_name_template',
default='snapshot-%s',
help='Template string to be used to generate snapshot names'),
cfg.StrOpt('backup_name_template',
default='backup-%s',
help='Template string to be used to generate backup names'), ]
CONF = cfg.CONF
CONF.register_opts(db_opts)
db_options.set_defaults(CONF)
_BACKEND_MAPPING = {'sqlalchemy': 'cinder.db.sqlalchemy.api'}
IMPL = oslo_db_api.DBAPI.from_config(conf=CONF,
backend_mapping=_BACKEND_MAPPING,
lazy=True)
# The maximum value a signed INT type may have
MAX_INT = constants.DB_MAX_INT
###################
def dispose_engine():
"""Force the engine to establish new connections."""
# FIXME(jdg): When using sqlite if we do the dispose
# we seem to lose our DB here. Adding this check
# means we don't do the dispose, but we keep our sqlite DB
# This likely isn't the best way to handle this
if 'sqlite' not in IMPL.get_engine().name:
return IMPL.dispose_engine()
else:
return
###################
def service_destroy(context, service_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, service_id)
def service_get(context, service_id=None, backend_match_level=None, **filters):
"""Get a service that matches the criteria.
A possible filter is is_up=True and it will filter nodes that are down.
:param service_id: Id of the service.
:param filters: Filters for the query in the form of key/value.
:param backend_match_level: 'pool', 'backend', or 'host' for host and
cluster filters (as defined in _filter_host
method)
:raise ServiceNotFound: If service doesn't exist.
"""
return IMPL.service_get(context, service_id, backend_match_level,
**filters)
def service_get_all(context, backend_match_level=None, **filters):
"""Get all services that match the criteria.
A possible filter is is_up=True and it will filter nodes that are down,
as well as host_or_cluster, that lets you look for services using both
of these properties.
:param filters: Filters for the query in the form of key/value arguments.
:param backend_match_level: 'pool', 'backend', or 'host' for host and
cluster filters (as defined in _filter_host
method)
"""
return IMPL.service_get_all(context, backend_match_level, **filters)
def service_create(context, values):
"""Create a service from the values dictionary."""
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
"""Set the given properties on an service and update it.
Raises NotFound if service does not exist.
"""
return IMPL.service_update(context, service_id, values)
###############
def is_backend_frozen(context, host, cluster_name):
"""Check if a storage backend is frozen based on host and cluster_name."""
return IMPL.is_backend_frozen(context, host, cluster_name)
###############
def cluster_get(context, id=None, is_up=None, get_services=False,
services_summary=False, read_deleted='no',
name_match_level=None, **filters):
"""Get a cluster that matches the criteria.
:param id: Id of the cluster.
:param is_up: Boolean value to filter based on the cluster's up status.
:param get_services: If we want to load all services from this cluster.
:param services_summary: If we want to load num_hosts and
num_down_hosts fields.
:param read_deleted: Filtering based on delete status. Default value is
"no".
:param name_match_level: 'pool', 'backend', or 'host' for name filter (as
defined in _filter_host method)
:param filters: Field based filters in the form of key/value.
:raise ClusterNotFound: If cluster doesn't exist.
"""
return IMPL.cluster_get(context, id, is_up, get_services, services_summary,
read_deleted, name_match_level, **filters)
def cluster_get_all(context, is_up=None, get_services=False,
services_summary=False, read_deleted='no',
name_match_level=None, **filters):
"""Get all clusters that match the criteria.
:param is_up: Boolean value to filter based on the cluster's up status.
:param get_services: If we want to load all services from this cluster.
:param services_summary: If we want to load num_hosts and
num_down_hosts fields.
:param read_deleted: Filtering based on delete status. Default value is
"no".
:param name_match_level: 'pool', 'backend', or 'host' for name filter (as
defined in _filter_host method)
:param filters: Field based filters in the form of key/value.
"""
return IMPL.cluster_get_all(context, is_up, get_services, services_summary,
read_deleted, name_match_level, **filters)
def cluster_create(context, values):
"""Create a cluster from the values dictionary."""
return IMPL.cluster_create(context, values)
def cluster_update(context, id, values):
"""Set the given properties on an cluster and update it.
Raises ClusterNotFound if cluster does not exist.
"""
return IMPL.cluster_update(context, id, values)
def cluster_destroy(context, id):
"""Destroy the cluster or raise if it does not exist or has hosts.
:raise ClusterNotFound: If cluster doesn't exist.
"""
return IMPL.cluster_destroy(context, id)
###############
def volume_attach(context, values):
"""Attach a volume."""
return IMPL.volume_attach(context, values)
def volume_attached(context, volume_id, instance_id, host_name, mountpoint,
attach_mode='rw'):
"""Ensure that a volume is set as attached."""
return IMPL.volume_attached(context, volume_id, instance_id, host_name,
mountpoint, attach_mode)
def volume_create(context, values):
"""Create a volume from the values dictionary."""
return IMPL.volume_create(context, values)
def volume_data_get_for_host(context, host, count_only=False):
"""Get (volume_count, gigabytes) for project."""
return IMPL.volume_data_get_for_host(context,
host,
count_only)
def volume_data_get_for_project(context, project_id):
"""Get (volume_count, gigabytes) for project."""
return IMPL.volume_data_get_for_project(context, project_id)
def volume_destroy(context, volume_id):
"""Destroy the volume or raise if it does not exist."""
return IMPL.volume_destroy(context, volume_id)
def volume_detached(context, volume_id, attachment_id):
"""Ensure that a volume is set as detached."""
return IMPL.volume_detached(context, volume_id, attachment_id)
def volume_get(context, volume_id):
"""Get a volume or raise if it does not exist."""
return IMPL.volume_get(context, volume_id)
def volume_get_all(context, marker=None, limit=None, sort_keys=None,
sort_dirs=None, filters=None, offset=None):
"""Get all volumes."""
return IMPL.volume_get_all(context, marker, limit, sort_keys=sort_keys,
sort_dirs=sort_dirs, filters=filters,
offset=offset)
def volume_get_all_by_host(context, host, filters=None):
"""Get all volumes belonging to a host."""
return IMPL.volume_get_all_by_host(context, host, filters=filters)
def volume_get_all_by_group(context, group_id, filters=None):
"""Get all volumes belonging to a consistency group."""
return IMPL.volume_get_all_by_group(context, group_id, filters=filters)
def volume_get_all_by_generic_group(context, group_id, filters=None):
"""Get all volumes belonging to a generic volume group."""
return IMPL.volume_get_all_by_generic_group(context, group_id,
filters=filters)
def volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None, filters=None,
offset=None):
"""Get all volumes belonging to a project."""
return IMPL.volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters,
offset=offset)
def get_volume_summary_all(context):
"""Get all volume summary."""
return IMPL.get_volume_summary_all(context)
def get_volume_summary_by_project(context, project_id):
"""Get all volume summary belonging to a project."""
return IMPL.get_volume_summary_by_project(context, project_id)
def volume_update(context, volume_id, values):
"""Set the given properties on a volume and update it.
Raises NotFound if volume does not exist.
"""
return IMPL.volume_update(context, volume_id, values)
def volumes_update(context, values_list):
"""Set the given properties on a list of volumes and update them.
Raises NotFound if a volume does not exist.
"""
return IMPL.volumes_update(context, values_list)
def volume_include_in_cluster(context, cluster, partial_rename=True,
**filters):
"""Include all volumes matching the filters into a cluster.
When partial_rename is set we will not set the cluster_name with cluster
parameter value directly, we'll replace provided cluster_name or host
filter value with cluster instead.
This is useful when we want to replace just the cluster name but leave
the backend and pool information as it is. If we are using cluster_name
to filter, we'll use that same DB field to replace the cluster value and
leave the rest as it is. Likewise if we use the host to filter.
Returns the number of volumes that have been changed.
"""
return IMPL.volume_include_in_cluster(context, cluster, partial_rename,
**filters)
def volume_attachment_update(context, attachment_id, values):
return IMPL.volume_attachment_update(context, attachment_id, values)
def volume_attachment_get(context, attachment_id):
return IMPL.volume_attachment_get(context, attachment_id)
def volume_attachment_get_all_by_volume_id(context, volume_id,
session=None):
return IMPL.volume_attachment_get_all_by_volume_id(context,
volume_id,
session)
def volume_attachment_get_all_by_host(context, host, filters=None):
# FIXME(jdg): Not using filters
return IMPL.volume_attachment_get_all_by_host(context, host)
def volume_attachment_get_all_by_instance_uuid(context,
instance_uuid, filters=None):
# FIXME(jdg): Not using filters
return IMPL.volume_attachment_get_all_by_instance_uuid(context,
instance_uuid)
def volume_attachment_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
return IMPL.volume_attachment_get_all(context, filters, marker, limit,
offset, sort_keys, sort_dirs)
def volume_attachment_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None, offset=None,
sort_keys=None, sort_dirs=None):
return IMPL.volume_attachment_get_all_by_project(context, project_id,
filters, marker, limit,
offset, sort_keys,
sort_dirs)
def attachment_destroy(context, attachment_id):
"""Destroy the attachment or raise if it does not exist."""
return IMPL.attachment_destroy(context, attachment_id)
def volume_update_status_based_on_attachment(context, volume_id):
"""Update volume status according to attached instance id"""
return IMPL.volume_update_status_based_on_attachment(context, volume_id)
def volume_has_snapshots_filter():
return IMPL.volume_has_snapshots_filter()
def volume_has_undeletable_snapshots_filter():
return IMPL.volume_has_undeletable_snapshots_filter()
def volume_has_snapshots_in_a_cgsnapshot_filter():
return IMPL.volume_has_snapshots_in_a_cgsnapshot_filter()
def volume_has_attachments_filter():
return IMPL.volume_has_attachments_filter()
def volume_qos_allows_retype(new_vol_type):
return IMPL.volume_qos_allows_retype(new_vol_type)
####################
def snapshot_create(context, values):
"""Create a snapshot from the values dictionary."""
return IMPL.snapshot_create(context, values)
def snapshot_destroy(context, snapshot_id):
"""Destroy the snapshot or raise if it does not exist."""
return IMPL.snapshot_destroy(context, snapshot_id)
def snapshot_get(context, snapshot_id):
"""Get a snapshot or raise if it does not exist."""
return IMPL.snapshot_get(context, snapshot_id)
def snapshot_get_all(context, filters=None, marker=None, limit=None,
sort_keys=None, sort_dirs=None, offset=None):
"""Get all snapshots."""
return IMPL.snapshot_get_all(context, filters, marker, limit, sort_keys,
sort_dirs, offset)
def snapshot_get_all_by_project(context, project_id, filters=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None):
"""Get all snapshots belonging to a project."""
return IMPL.snapshot_get_all_by_project(context, project_id, filters,
marker, limit, sort_keys,
sort_dirs, offset)
def snapshot_get_all_by_host(context, host, filters=None):
"""Get all snapshots belonging to a host.
:param host: Include include snapshots only for specified host.
:param filters: Filters for the query in the form of key/value.
"""
return IMPL.snapshot_get_all_by_host(context, host, filters)
def snapshot_get_all_for_cgsnapshot(context, project_id):
"""Get all snapshots belonging to a cgsnapshot."""
return IMPL.snapshot_get_all_for_cgsnapshot(context, project_id)
def snapshot_get_all_for_group_snapshot(context, group_snapshot_id):
"""Get all snapshots belonging to a group snapshot."""
return IMPL.snapshot_get_all_for_group_snapshot(context, group_snapshot_id)
def snapshot_get_all_for_volume(context, volume_id):
"""Get all snapshots for a volume."""
return IMPL.snapshot_get_all_for_volume(context, volume_id)
def snapshot_update(context, snapshot_id, values):
"""Set the given properties on an snapshot and update it.
Raises NotFound if snapshot does not exist.
"""
return IMPL.snapshot_update(context, snapshot_id, values)
def snapshot_data_get_for_project(context, project_id, volume_type_id=None):
"""Get count and gigabytes used for snapshots for specified project."""
return IMPL.snapshot_data_get_for_project(context,
project_id,
volume_type_id)
def snapshot_get_all_active_by_window(context, begin, end=None,
project_id=None):
"""Get all the snapshots inside the window.
Specifying a project_id will filter for a certain project.
"""
return IMPL.snapshot_get_all_active_by_window(context, begin, end,
project_id)
####################
def snapshot_metadata_get(context, snapshot_id):
"""Get all metadata for a snapshot."""
return IMPL.snapshot_metadata_get(context, snapshot_id)
def snapshot_metadata_delete(context, snapshot_id, key):
"""Delete the given metadata item."""
return IMPL.snapshot_metadata_delete(context, snapshot_id, key)
def snapshot_metadata_update(context, snapshot_id, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
return IMPL.snapshot_metadata_update(context, snapshot_id,
metadata, delete)
####################
def volume_metadata_get(context, volume_id):
"""Get all metadata for a volume."""
return IMPL.volume_metadata_get(context, volume_id)
def volume_metadata_delete(context, volume_id, key,
meta_type=common.METADATA_TYPES.user):
"""Delete the given metadata item."""
return IMPL.volume_metadata_delete(context, volume_id,
key, meta_type)
def volume_metadata_update(context, volume_id, metadata,
delete, meta_type=common.METADATA_TYPES.user):
"""Update metadata if it exists, otherwise create it."""
return IMPL.volume_metadata_update(context, volume_id, metadata,
delete, meta_type)
##################
def volume_admin_metadata_get(context, volume_id):
"""Get all administration metadata for a volume."""
return IMPL.volume_admin_metadata_get(context, volume_id)
def volume_admin_metadata_delete(context, volume_id, key):
"""Delete the given metadata item."""
return IMPL.volume_admin_metadata_delete(context, volume_id, key)
def volume_admin_metadata_update(context, volume_id, metadata, delete,
add=True, update=True):
"""Update metadata if it exists, otherwise create it."""
return IMPL.volume_admin_metadata_update(context, volume_id, metadata,
delete, add, update)
##################
def volume_type_create(context, values, projects=None):
"""Create a new volume type."""
return IMPL.volume_type_create(context, values, projects)
def volume_type_update(context, volume_type_id, values):
return IMPL.volume_type_update(context, volume_type_id, values)
def volume_type_get_all(context, inactive=False, filters=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None, list_result=False):
"""Get all volume types.
:param context: context to query under
:param inactive: Include inactive volume types to the result set
:param filters: Filters for the query in the form of key/value.
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param list_result: For compatibility, if list_result = True, return a list
instead of dict.
:is_public: Filter volume types based on visibility:
* **True**: List public volume types only
* **False**: List private volume types only
* **None**: List both public and private volume types
:returns: list/dict of matching volume types
"""
return IMPL.volume_type_get_all(context, inactive, filters, marker=marker,
limit=limit, sort_keys=sort_keys,
sort_dirs=sort_dirs, offset=offset,
list_result=list_result)
def volume_type_get(context, id, inactive=False, expected_fields=None):
"""Get volume type by id.
:param context: context to query under
:param id: Volume type id to get.
:param inactive: Consider inactive volume types when searching
:param expected_fields: Return those additional fields.
Supported fields are: projects.
:returns: volume type
"""
return IMPL.volume_type_get(context, id, inactive, expected_fields)
def volume_type_get_by_name(context, name):
"""Get volume type by name."""
return IMPL.volume_type_get_by_name(context, name)
def volume_types_get_by_name_or_id(context, volume_type_list):
"""Get volume types by name or id."""
return IMPL.volume_types_get_by_name_or_id(context, volume_type_list)
def volume_type_qos_associations_get(context, qos_specs_id, inactive=False):
"""Get volume types that are associated with specific qos specs."""
return IMPL.volume_type_qos_associations_get(context,
qos_specs_id,
inactive)
def volume_type_qos_associate(context, type_id, qos_specs_id):
"""Associate a volume type with specific qos specs."""
return IMPL.volume_type_qos_associate(context, type_id, qos_specs_id)
def volume_type_qos_disassociate(context, qos_specs_id, type_id):
"""Disassociate a volume type from specific qos specs."""
return IMPL.volume_type_qos_disassociate(context, qos_specs_id, type_id)
def volume_type_qos_disassociate_all(context, qos_specs_id):
"""Disassociate all volume types from specific qos specs."""
return IMPL.volume_type_qos_disassociate_all(context,
qos_specs_id)
def volume_type_qos_specs_get(context, type_id):
"""Get all qos specs for given volume type."""
return IMPL.volume_type_qos_specs_get(context, type_id)
def volume_type_destroy(context, id):
"""Delete a volume type."""
return IMPL.volume_type_destroy(context, id)
def volume_get_all_active_by_window(context, begin, end=None, project_id=None):
"""Get all the volumes inside the window.
Specifying a project_id will filter for a certain project.
"""
return IMPL.volume_get_all_active_by_window(context, begin, end,
project_id)
def volume_type_access_get_all(context, type_id):
"""Get all volume type access of a volume type."""
return IMPL.volume_type_access_get_all(context, type_id)
def volume_type_access_add(context, type_id, project_id):
"""Add volume type access for project."""
return IMPL.volume_type_access_add(context, type_id, project_id)
def volume_type_access_remove(context, type_id, project_id):
"""Remove volume type access for project."""
return IMPL.volume_type_access_remove(context, type_id, project_id)
####################
def group_type_create(context, values, projects=None):
"""Create a new group type."""
return IMPL.group_type_create(context, values, projects)
def group_type_update(context, group_type_id, values):
return IMPL.group_type_update(context, group_type_id, values)
def group_type_get_all(context, inactive=False, filters=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None, list_result=False):
"""Get all group types.
:param context: context to query under
:param inactive: Include inactive group types to the result set
:param filters: Filters for the query in the form of key/value.
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param list_result: For compatibility, if list_result = True, return a list
instead of dict.
:is_public: Filter group types based on visibility:
* **True**: List public group types only
* **False**: List private group types only
* **None**: List both public and private group types
:returns: list/dict of matching group types
"""
return IMPL.group_type_get_all(context, inactive, filters, marker=marker,
limit=limit, sort_keys=sort_keys,
sort_dirs=sort_dirs, offset=offset,
list_result=list_result)
def group_type_get(context, id, inactive=False, expected_fields=None):
"""Get group type by id.
:param context: context to query under
:param id: Group type id to get.
:param inactive: Consider inactive group types when searching
:param expected_fields: Return those additional fields.
Supported fields are: projects.
:returns: group type
"""
return IMPL.group_type_get(context, id, inactive, expected_fields)
def group_type_get_by_name(context, name):
"""Get group type by name."""
return IMPL.group_type_get_by_name(context, name)
def group_types_get_by_name_or_id(context, group_type_list):
"""Get group types by name or id."""
return IMPL.group_types_get_by_name_or_id(context, group_type_list)
def group_type_destroy(context, id):
"""Delete a group type."""
return IMPL.group_type_destroy(context, id)
def group_type_access_get_all(context, type_id):
"""Get all group type access of a group type."""
return IMPL.group_type_access_get_all(context, type_id)
def group_type_access_add(context, type_id, project_id):
"""Add group type access for project."""
return IMPL.group_type_access_add(context, type_id, project_id)
def group_type_access_remove(context, type_id, project_id):
"""Remove group type access for project."""
return IMPL.group_type_access_remove(context, type_id, project_id)
def volume_type_get_all_by_group(context, group_id):
"""Get all volumes in a group."""
return IMPL.volume_type_get_all_by_group(context, group_id)
####################
def volume_type_extra_specs_get(context, volume_type_id):
"""Get all extra specs for a volume type."""
return IMPL.volume_type_extra_specs_get(context, volume_type_id)
def volume_type_extra_specs_delete(context, volume_type_id, key):
"""Delete the given extra specs item."""
return IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
def volume_type_extra_specs_update_or_create(context,
volume_type_id,
extra_specs):
"""Create or update volume type extra specs.
This adds or modifies the key/value pairs specified in the extra specs dict
argument.
"""
return IMPL.volume_type_extra_specs_update_or_create(context,
volume_type_id,
extra_specs)
###################
def group_type_specs_get(context, group_type_id):
"""Get all group specs for a group type."""
return IMPL.group_type_specs_get(context, group_type_id)
def group_type_specs_delete(context, group_type_id, key):
"""Delete the given group specs item."""
return IMPL.group_type_specs_delete(context, group_type_id, key)
def group_type_specs_update_or_create(context,
group_type_id,
group_specs):
"""Create or update group type specs.
This adds or modifies the key/value pairs specified in the group specs dict
argument.
"""
return IMPL.group_type_specs_update_or_create(context,
group_type_id,
group_specs)
###################
def volume_type_encryption_get(context, volume_type_id, session=None):
return IMPL.volume_type_encryption_get(context, volume_type_id, session)
def volume_type_encryption_delete(context, volume_type_id):
return IMPL.volume_type_encryption_delete(context, volume_type_id)
def volume_type_encryption_create(context, volume_type_id, encryption_specs):
return IMPL.volume_type_encryption_create(context, volume_type_id,
encryption_specs)
def volume_type_encryption_update(context, volume_type_id, encryption_specs):
return IMPL.volume_type_encryption_update(context, volume_type_id,
encryption_specs)
def volume_type_encryption_volume_get(context, volume_type_id, session=None):
return IMPL.volume_type_encryption_volume_get(context, volume_type_id,
session)
def volume_encryption_metadata_get(context, volume_id, session=None):
return IMPL.volume_encryption_metadata_get(context, volume_id, session)
###################
def qos_specs_create(context, values):
"""Create a qos_specs."""
return IMPL.qos_specs_create(context, values)
def qos_specs_get(context, qos_specs_id):
"""Get all specification for a given qos_specs."""
return IMPL.qos_specs_get(context, qos_specs_id)
def qos_specs_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Get all qos_specs."""
return IMPL.qos_specs_get_all(context, filters=filters, marker=marker,
limit=limit, offset=offset,
sort_keys=sort_keys, sort_dirs=sort_dirs)
def qos_specs_get_by_name(context, name):
"""Get all specification for a given qos_specs."""
return IMPL.qos_specs_get_by_name(context, name)
def qos_specs_associations_get(context, qos_specs_id):
"""Get all associated volume types for a given qos_specs."""
return IMPL.qos_specs_associations_get(context, qos_specs_id)
def qos_specs_associate(context, qos_specs_id, type_id):
"""Associate qos_specs from volume type."""
return IMPL.qos_specs_associate(context, qos_specs_id, type_id)
def qos_specs_disassociate(context, qos_specs_id, type_id):
"""Disassociate qos_specs from volume type."""
return IMPL.qos_specs_disassociate(context, qos_specs_id, type_id)
def qos_specs_disassociate_all(context, qos_specs_id):
"""Disassociate qos_specs from all entities."""
return IMPL.qos_specs_disassociate_all(context, qos_specs_id)
def qos_specs_delete(context, qos_specs_id):
"""Delete the qos_specs."""
return IMPL.qos_specs_delete(context, qos_specs_id)
def qos_specs_item_delete(context, qos_specs_id, key):
"""Delete specified key in the qos_specs."""
return IMPL.qos_specs_item_delete(context, qos_specs_id, key)
def qos_specs_update(context, qos_specs_id, specs):
"""Update qos specs.
This adds or modifies the key/value pairs specified in the
specs dict argument for a given qos_specs.
"""
return IMPL.qos_specs_update(context, qos_specs_id, specs)
###################
def volume_glance_metadata_create(context, volume_id, key, value):
"""Update the Glance metadata for the specified volume."""
return IMPL.volume_glance_metadata_create(context,
volume_id,
key,
value)
def volume_glance_metadata_bulk_create(context, volume_id, metadata):
"""Add Glance metadata for specified volume (multiple pairs)."""
return IMPL.volume_glance_metadata_bulk_create(context, volume_id,
metadata)
def volume_glance_metadata_get_all(context):
"""Return the glance metadata for all volumes."""
return IMPL.volume_glance_metadata_get_all(context)
def volume_glance_metadata_get(context, volume_id):
"""Return the glance metadata for a volume."""
return IMPL.volume_glance_metadata_get(context, volume_id)
def volume_glance_metadata_list_get(context, volume_id_list):
"""Return the glance metadata for a volume list."""
return IMPL.volume_glance_metadata_list_get(context, volume_id_list)
def volume_snapshot_glance_metadata_get(context, snapshot_id):
"""Return the Glance metadata for the specified snapshot."""
return IMPL.volume_snapshot_glance_metadata_get(context, snapshot_id)
def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id):
"""Update the Glance metadata for a snapshot.
This will copy all of the key:value pairs from the originating volume,
to ensure that a volume created from the snapshot will retain the
original metadata.
"""
return IMPL.volume_glance_metadata_copy_to_snapshot(context, snapshot_id,
volume_id)
def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id):
"""Update the Glance metadata from a volume (created from a snapshot).
This will copy all of the key:value pairs from the originating snapshot,
to ensure that the Glance metadata from the original volume is retained.
"""
return IMPL.volume_glance_metadata_copy_to_volume(context, volume_id,
snapshot_id)
def volume_glance_metadata_delete_by_volume(context, volume_id):
"""Delete the glance metadata for a volume."""
return IMPL.volume_glance_metadata_delete_by_volume(context, volume_id)
def volume_glance_metadata_delete_by_snapshot(context, snapshot_id):
"""Delete the glance metadata for a snapshot."""
return IMPL.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
def volume_glance_metadata_copy_from_volume_to_volume(context,
src_volume_id,
volume_id):
"""Update the Glance metadata for a volume.
Update the Glance metadata for a volume by copying all of the key:value
pairs from the originating volume.
This is so that a volume created from the volume (clone) will retain the
original metadata.
"""
return IMPL.volume_glance_metadata_copy_from_volume_to_volume(
context,
src_volume_id,
volume_id)
###################
def quota_create(context, project_id, resource, limit, allocated=0):
"""Create a quota for the given project and resource."""
return IMPL.quota_create(context, project_id, resource, limit,
allocated=allocated)
def quota_get(context, project_id, resource):
"""Retrieve a quota or raise if it does not exist."""
return IMPL.quota_get(context, project_id, resource)
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
def quota_allocated_get_all_by_project(context, project_id):
"""Retrieve all allocated quotas associated with a given project."""
return IMPL.quota_allocated_get_all_by_project(context, project_id)
def quota_allocated_update(context, project_id,
resource, allocated):
"""Update allocated quota to subprojects or raise if it does not exist.
:raises: cinder.exception.ProjectQuotaNotFound
"""
return IMPL.quota_allocated_update(context, project_id,
resource, allocated)
def quota_update(context, project_id, resource, limit):
"""Update a quota or raise if it does not exist."""
return IMPL.quota_update(context, project_id, resource, limit)
def quota_update_resource(context, old_res, new_res):
"""Update resource of quotas."""
return IMPL.quota_update_resource(context, old_res, new_res)
def quota_destroy(context, project_id, resource):
"""Destroy the quota or raise if it does not exist."""
return IMPL.quota_destroy(context, project_id, resource)
###################
def quota_class_create(context, class_name, resource, limit):
"""Create a quota class for the given name and resource."""
return IMPL.quota_class_create(context, class_name, resource, limit)
def quota_class_get(context, class_name, resource):
"""Retrieve a quota class or raise if it does not exist."""
return IMPL.quota_class_get(context, class_name, resource)
def quota_class_get_defaults(context):
"""Retrieve all default quotas."""
return IMPL.quota_class_get_defaults(context)
def quota_class_get_all_by_name(context, class_name):
"""Retrieve all quotas associated with a given quota class."""
return IMPL.quota_class_get_all_by_name(context, class_name)
def quota_class_update(context, class_name, resource, limit):
"""Update a quota class or raise if it does not exist."""
return IMPL.quota_class_update(context, class_name, resource, limit)
def quota_class_update_resource(context, resource, new_resource):
"""Update resource name in quota_class."""
return IMPL.quota_class_update_resource(context, resource, new_resource)
def quota_class_destroy(context, class_name, resource):
"""Destroy the quota class or raise if it does not exist."""
return IMPL.quota_class_destroy(context, class_name, resource)
def quota_class_destroy_all_by_name(context, class_name):
"""Destroy all quotas associated with a given quota class."""
return IMPL.quota_class_destroy_all_by_name(context, class_name)
###################
def quota_usage_get(context, project_id, resource):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(context, project_id, resource)
def quota_usage_get_all_by_project(context, project_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project(context, project_id)
###################
def quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=None,
is_allocated_reserve=False):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=project_id,
is_allocated_reserve=is_allocated_reserve)
def reservation_commit(context, reservations, project_id=None):
"""Commit quota reservations."""
return IMPL.reservation_commit(context, reservations,
project_id=project_id)
def reservation_rollback(context, reservations, project_id=None):
"""Roll back quota reservations."""
return IMPL.reservation_rollback(context, reservations,
project_id=project_id)
def quota_destroy_by_project(context, project_id):
"""Destroy all quotas associated with a given project."""
return IMPL.quota_destroy_by_project(context, project_id)
def reservation_expire(context):
"""Roll back any expired reservations."""
return IMPL.reservation_expire(context)
def quota_usage_update_resource(context, old_res, new_res):
"""Update resource field in quota_usages."""
return IMPL.quota_usage_update_resource(context, old_res, new_res)
###################
def backup_get(context, backup_id, read_deleted=None, project_only=True):
"""Get a backup or raise if it does not exist."""
return IMPL.backup_get(context, backup_id, read_deleted, project_only)
def backup_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Get all backups."""
return IMPL.backup_get_all(context, filters=filters, marker=marker,
limit=limit, offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
def backup_get_all_by_host(context, host):
"""Get all backups belonging to a host."""
return IMPL.backup_get_all_by_host(context, host)
def backup_create(context, values):
"""Create a backup from the values dictionary."""
return IMPL.backup_create(context, values)
def backup_get_all_by_project(context, project_id, filters=None, marker=None,
limit=None, offset=None, sort_keys=None,
sort_dirs=None):
"""Get all backups belonging to a project."""
return IMPL.backup_get_all_by_project(context, project_id,
filters=filters, marker=marker,
limit=limit, offset=offset,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
def backup_get_all_by_volume(context, volume_id, filters=None):
"""Get all backups belonging to a volume."""
return IMPL.backup_get_all_by_volume(context, volume_id,
filters=filters)
def backup_get_all_active_by_window(context, begin, end=None, project_id=None):
"""Get all the backups inside the window.
Specifying a project_id will filter for a certain project.
"""
return IMPL.backup_get_all_active_by_window(context, begin, end,
project_id)
def backup_update(context, backup_id, values):
"""Set the given properties on a backup and update it.
Raises NotFound if backup does not exist.
"""
return IMPL.backup_update(context, backup_id, values)
def backup_destroy(context, backup_id):
"""Destroy the backup or raise if it does not exist."""
return IMPL.backup_destroy(context, backup_id)
###################
def transfer_get(context, transfer_id):
"""Get a volume transfer record or raise if it does not exist."""
return IMPL.transfer_get(context, transfer_id)
def transfer_get_all(context):
"""Get all volume transfer records."""
return IMPL.transfer_get_all(context)
def transfer_get_all_by_project(context, project_id):
"""Get all volume transfer records for specified project."""
return IMPL.transfer_get_all_by_project(context, project_id)
def transfer_create(context, values):
"""Create an entry in the transfers table."""
return IMPL.transfer_create(context, values)
def transfer_destroy(context, transfer_id):
"""Destroy a record in the volume transfer table."""
return IMPL.transfer_destroy(context, transfer_id)
def transfer_accept(context, transfer_id, user_id, project_id):
"""Accept a volume transfer."""
return IMPL.transfer_accept(context, transfer_id, user_id, project_id)
###################
def consistencygroup_get(context, consistencygroup_id):
"""Get a consistencygroup or raise if it does not exist."""
return IMPL.consistencygroup_get(context, consistencygroup_id)
def consistencygroup_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Get all consistencygroups."""
return IMPL.consistencygroup_get_all(context, filters=filters,
marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
def consistencygroup_create(context, values, cg_snap_id=None, cg_id=None):
"""Create a consistencygroup from the values dictionary."""
return IMPL.consistencygroup_create(context, values, cg_snap_id, cg_id)
def consistencygroup_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None, offset=None,
sort_keys=None, sort_dirs=None):
"""Get all consistencygroups belonging to a project."""
return IMPL.consistencygroup_get_all_by_project(context, project_id,
filters=filters,
marker=marker, limit=limit,
offset=offset,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
def consistencygroup_update(context, consistencygroup_id, values):
"""Set the given properties on a consistencygroup and update it.
Raises NotFound if consistencygroup does not exist.
"""
return IMPL.consistencygroup_update(context, consistencygroup_id, values)
def consistencygroup_destroy(context, consistencygroup_id):
"""Destroy the consistencygroup or raise if it does not exist."""
return IMPL.consistencygroup_destroy(context, consistencygroup_id)
def cg_has_cgsnapshot_filter():
"""Return a filter that checks if a CG has CG Snapshots."""
return IMPL.cg_has_cgsnapshot_filter()
def cg_has_volumes_filter(attached_or_with_snapshots=False):
"""Return a filter to check if a CG has volumes.
When attached_or_with_snapshots parameter is given a True value only
attached volumes or those with snapshots will be considered.
"""
return IMPL.cg_has_volumes_filter(attached_or_with_snapshots)
def cg_creating_from_src(cg_id=None, cgsnapshot_id=None):
"""Return a filter to check if a CG is being used as creation source.
Returned filter is meant to be used in the Conditional Update mechanism and
checks if provided CG ID or CG Snapshot ID is currently being used to
create another CG.
This filter will not include CGs that have used the ID but have already
finished their creation (status is no longer creating).
Filter uses a subquery that allows it to be used on updates to the
consistencygroups table.
"""
return IMPL.cg_creating_from_src(cg_id, cgsnapshot_id)
def consistencygroup_include_in_cluster(context, cluster, partial_rename=True,
**filters):
"""Include all consistency groups matching the filters into a cluster.
When partial_rename is set we will not set the cluster_name with cluster
parameter value directly, we'll replace provided cluster_name or host
filter value with cluster instead.
This is useful when we want to replace just the cluster name but leave
the backend and pool information as it is. If we are using cluster_name
to filter, we'll use that same DB field to replace the cluster value and
leave the rest as it is. Likewise if we use the host to filter.
Returns the number of consistency groups that have been changed.
"""
return IMPL.consistencygroup_include_in_cluster(context, cluster,
partial_rename,
**filters)
def migrate_add_message_prefix(context, max_count, force=False):
"""Change Message event ids to start with the VOLUME_ prefix.
:param max_count: The maximum number of messages to consider in
this run.
:param force: Ignored in this migration
:returns: number of messages needing migration, number of
messages migrated (both will always be less than
max_count).
"""
return IMPL.migrate_add_message_prefix(context, max_count, force)
###################
def group_get(context, group_id):
"""Get a group or raise if it does not exist."""
return IMPL.group_get(context, group_id)
def group_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Get all groups."""
return IMPL.group_get_all(context, filters=filters,
marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
def group_create(context, values, group_snapshot_id=None, group_id=None):
"""Create a group from the values dictionary."""
return IMPL.group_create(context, values, group_snapshot_id, group_id)
def group_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None, offset=None,
sort_keys=None, sort_dirs=None):
"""Get all groups belonging to a project."""
return IMPL.group_get_all_by_project(context, project_id,
filters=filters,
marker=marker, limit=limit,
offset=offset,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
def group_update(context, group_id, values):
"""Set the given properties on a group and update it.
Raises NotFound if group does not exist.
"""
return IMPL.group_update(context, group_id, values)
def group_destroy(context, group_id):
"""Destroy the group or raise if it does not exist."""
return IMPL.group_destroy(context, group_id)
def group_has_group_snapshot_filter():
"""Return a filter that checks if a Group has Group Snapshots."""
return IMPL.group_has_group_snapshot_filter()
def group_has_volumes_filter(attached_or_with_snapshots=False):
"""Return a filter to check if a Group has volumes.
When attached_or_with_snapshots parameter is given a True value only
attached volumes or those with snapshots will be considered.
"""
return IMPL.group_has_volumes_filter(attached_or_with_snapshots)
def group_creating_from_src(group_id=None, group_snapshot_id=None):
"""Return a filter to check if a Group is being used as creation source.
Returned filter is meant to be used in the Conditional Update mechanism and
checks if provided Group ID or Group Snapshot ID is currently being used to
create another Group.
This filter will not include Groups that have used the ID but have already
finished their creation (status is no longer creating).
Filter uses a subquery that allows it to be used on updates to the
groups table.
"""
return IMPL.group_creating_from_src(group_id, group_snapshot_id)
def group_volume_type_mapping_create(context, group_id, volume_type_id):
"""Create a group volume_type mapping entry."""
return IMPL.group_volume_type_mapping_create(context, group_id,
volume_type_id)
def migrate_consistencygroups_to_groups(context, max_count, force=False):
"""Migrage CGs to generic volume groups"""
return IMPL.migrate_consistencygroups_to_groups(context, max_count, force)
###################
def cgsnapshot_get(context, cgsnapshot_id):
"""Get a cgsnapshot or raise if it does not exist."""
return IMPL.cgsnapshot_get(context, cgsnapshot_id)
def cgsnapshot_get_all(context, filters=None):
"""Get all cgsnapshots."""
return IMPL.cgsnapshot_get_all(context, filters)
def cgsnapshot_create(context, values):
"""Create a cgsnapshot from the values dictionary."""
return IMPL.cgsnapshot_create(context, values)
def cgsnapshot_get_all_by_group(context, group_id, filters=None):
"""Get all cgsnapshots belonging to a consistency group."""
return IMPL.cgsnapshot_get_all_by_group(context, group_id, filters)
def cgsnapshot_get_all_by_project(context, project_id, filters=None):
"""Get all cgsnapshots belonging to a project."""
return IMPL.cgsnapshot_get_all_by_project(context, project_id, filters)
def cgsnapshot_update(context, cgsnapshot_id, values):
"""Set the given properties on a cgsnapshot and update it.
Raises NotFound if cgsnapshot does not exist.
"""
return IMPL.cgsnapshot_update(context, cgsnapshot_id, values)
def cgsnapshot_destroy(context, cgsnapshot_id):
"""Destroy the cgsnapshot or raise if it does not exist."""
return IMPL.cgsnapshot_destroy(context, cgsnapshot_id)
def cgsnapshot_creating_from_src():
"""Get a filter that checks if a CGSnapshot is being created from a CG."""
return IMPL.cgsnapshot_creating_from_src()
###################
def group_snapshot_get(context, group_snapshot_id):
"""Get a group snapshot or raise if it does not exist."""
return IMPL.group_snapshot_get(context, group_snapshot_id)
def group_snapshot_get_all(context, filters=None):
"""Get all group snapshots."""
return IMPL.group_snapshot_get_all(context, filters)
def group_snapshot_create(context, values):
"""Create a group snapshot from the values dictionary."""
return IMPL.group_snapshot_create(context, values)
def group_snapshot_get_all_by_group(context, group_id, filters=None):
"""Get all group snapshots belonging to a group."""
return IMPL.group_snapshot_get_all_by_group(context, group_id, filters)
def group_snapshot_get_all_by_project(context, project_id, filters=None):
"""Get all group snapshots belonging to a project."""
return IMPL.group_snapshot_get_all_by_project(context, project_id, filters)
def group_snapshot_update(context, group_snapshot_id, values):
"""Set the given properties on a group snapshot and update it.
Raises NotFound if group snapshot does not exist.
"""
return IMPL.group_snapshot_update(context, group_snapshot_id, values)
def group_snapshot_destroy(context, group_snapshot_id):
"""Destroy the group snapshot or raise if it does not exist."""
return IMPL.group_snapshot_destroy(context, group_snapshot_id)
def group_snapshot_creating_from_src():
"""Get a filter to check if a grp snapshot is being created from a grp."""
return IMPL.group_snapshot_creating_from_src()
###################
def purge_deleted_rows(context, age_in_days):
"""Purge deleted rows older than given age from cinder tables
Raises InvalidParameterValue if age_in_days is incorrect.
:returns: number of deleted rows
"""
return IMPL.purge_deleted_rows(context, age_in_days=age_in_days)
def get_booleans_for_table(table_name):
return IMPL.get_booleans_for_table(table_name)
###################
def driver_initiator_data_insert_by_key(context, initiator,
namespace, key, value):
"""Updates DriverInitiatorData entry.
Sets the value for the specified key within the namespace.
If the entry already exists return False, if it inserted successfully
return True.
"""
return IMPL.driver_initiator_data_insert_by_key(context,
initiator,
namespace,
key,
value)
def driver_initiator_data_get(context, initiator, namespace):
"""Query for an DriverInitiatorData that has the specified key"""
return IMPL.driver_initiator_data_get(context, initiator, namespace)
###################
def image_volume_cache_create(context, host, cluster_name, image_id,
image_updated_at, volume_id, size):
"""Create a new image volume cache entry."""
return IMPL.image_volume_cache_create(context,
host,
cluster_name,
image_id,
image_updated_at,
volume_id,
size)
def image_volume_cache_delete(context, volume_id):
"""Delete an image volume cache entry specified by volume id."""
return IMPL.image_volume_cache_delete(context, volume_id)
def image_volume_cache_get_and_update_last_used(context, image_id, **filters):
"""Query for an image volume cache entry."""
return IMPL.image_volume_cache_get_and_update_last_used(context,
image_id,
**filters)
def image_volume_cache_get_by_volume_id(context, volume_id):
"""Query to see if a volume id is an image-volume contained in the cache"""
return IMPL.image_volume_cache_get_by_volume_id(context, volume_id)
def image_volume_cache_get_all(context, **filters):
"""Query for all image volume cache entry for a host."""
return IMPL.image_volume_cache_get_all(context, **filters)
def image_volume_cache_include_in_cluster(context, cluster,
partial_rename=True, **filters):
"""Include in cluster image volume cache entries matching the filters.
When partial_rename is set we will not set the cluster_name with cluster
parameter value directly, we'll replace provided cluster_name or host
filter value with cluster instead.
This is useful when we want to replace just the cluster name but leave
the backend and pool information as it is. If we are using cluster_name
to filter, we'll use that same DB field to replace the cluster value and
leave the rest as it is. Likewise if we use the host to filter.
Returns the number of volumes that have been changed.
"""
return IMPL.image_volume_cache_include_in_cluster(
context, cluster, partial_rename, **filters)
###################
def message_get(context, message_id):
"""Return a message with the specified ID."""
return IMPL.message_get(context, message_id)
def message_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
return IMPL.message_get_all(context, filters=filters, marker=marker,
limit=limit, offset=offset,
sort_keys=sort_keys, sort_dirs=sort_dirs)
def message_create(context, values):
"""Creates a new message with the specified values."""
return IMPL.message_create(context, values)
def message_destroy(context, message_id):
"""Deletes message with the specified ID."""
return IMPL.message_destroy(context, message_id)
###################
def workers_init():
"""Check if DB supports subsecond resolution and set global flag.
MySQL 5.5 doesn't support subsecond resolution in datetime fields, so we
have to take it into account when working with the worker's table.
Once we drop support for MySQL 5.5 we can remove this method.
"""
return IMPL.workers_init()
def worker_create(context, **values):
"""Create a worker entry from optional arguments."""
return IMPL.worker_create(context, **values)
def worker_get(context, **filters):
"""Get a worker or raise exception if it does not exist."""
return IMPL.worker_get(context, **filters)
def worker_get_all(context, until=None, db_filters=None, **filters):
"""Get all workers that match given criteria."""
return IMPL.worker_get_all(context, until=until, db_filters=db_filters,
**filters)
def worker_update(context, id, filters=None, orm_worker=None, **values):
"""Update a worker with given values."""
return IMPL.worker_update(context, id, filters=filters,
orm_worker=orm_worker, **values)
def worker_claim_for_cleanup(context, claimer_id, orm_worker):
"""Soft delete a worker, change the service_id and update the worker."""
return IMPL.worker_claim_for_cleanup(context, claimer_id, orm_worker)
def worker_destroy(context, **filters):
"""Delete a worker (no soft delete)."""
return IMPL.worker_destroy(context, **filters)
###################
def resource_exists(context, model, resource_id):
return IMPL.resource_exists(context, model, resource_id)
def get_model_for_versioned_object(versioned_object):
return IMPL.get_model_for_versioned_object(versioned_object)
def get_by_id(context, model, id, *args, **kwargs):
return IMPL.get_by_id(context, model, id, *args, **kwargs)
class Condition(object):
"""Class for normal condition values for conditional_update."""
def __init__(self, value, field=None):
self.value = value
# Field is optional and can be passed when getting the filter
self.field = field
def get_filter(self, model, field=None):
return IMPL.condition_db_filter(model, self._get_field(field),
self.value)
def _get_field(self, field=None):
# We must have a defined field on initialization or when called
field = field or self.field
if not field:
raise ValueError(_('Condition has no field.'))
return field
###################
def attachment_specs_get(context, attachment_id):
"""Get all specs for an attachment."""
return IMPL.attachment_specs_get(context, attachment_id)
def attachment_specs_delete(context, attachment_id, key):
"""Delete the given attachment specs item."""
return IMPL.attachment_specs_delete(context, attachment_id, key)
def attachment_specs_update_or_create(context,
attachment_id,
specs):
"""Create or update attachment specs.
This adds or modifies the key/value pairs specified in the attachment
specs dict argument.
"""
return IMPL.attachment_specs_update_or_create(context,
attachment_id,
specs)
###################
class Not(Condition):
"""Class for negated condition values for conditional_update.
By default NULL values will be treated like Python treats None instead of
how SQL treats it.
So for example when values are (1, 2) it will evaluate to True when we have
value 3 or NULL, instead of only with 3 like SQL does.
"""
def __init__(self, value, field=None, auto_none=True):
super(Not, self).__init__(value, field)
self.auto_none = auto_none
def get_filter(self, model, field=None):
# If implementation has a specific method use it
if hasattr(IMPL, 'condition_not_db_filter'):
return IMPL.condition_not_db_filter(model, self._get_field(field),
self.value, self.auto_none)
# Otherwise non negated object must adming ~ operator for not
return ~super(Not, self).get_filter(model, field)
class Case(object):
"""Class for conditional value selection for conditional_update."""
def __init__(self, whens, value=None, else_=None):
self.whens = whens
self.value = value
self.else_ = else_
def is_orm_value(obj):
"""Check if object is an ORM field."""
return IMPL.is_orm_value(obj)
def conditional_update(context, model, values, expected_values, filters=(),
include_deleted='no', project_only=False, order=None):
"""Compare-and-swap conditional update.
Update will only occur in the DB if conditions are met.
We have 4 different condition types we can use in expected_values:
- Equality: {'status': 'available'}
- Inequality: {'status': vol_obj.Not('deleting')}
- In range: {'status': ['available', 'error']
- Not in range: {'status': vol_obj.Not(['in-use', 'attaching'])
Method accepts additional filters, which are basically anything that can be
passed to a sqlalchemy query's filter method, for example:
.. code-block:: python
[~sql.exists().where(models.Volume.id == models.Snapshot.volume_id)]
We can select values based on conditions using Case objects in the 'values'
argument. For example:
.. code-block:: python
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
case_values = db.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
db.conditional_update(context, models.Volume, {'status': case_values},
{'status': 'available'})
And we can use DB fields for example to store previous status in the
corresponding field even though we don't know which value is in the db from
those we allowed:
.. code-block:: python
db.conditional_update(context, models.Volume,
{'status': 'deleting',
'previous_status': models.Volume.status},
{'status': ('available', 'error')})
:param values: Dictionary of key-values to update in the DB.
:param expected_values: Dictionary of conditions that must be met for the
update to be executed.
:param filters: Iterable with additional filters.
:param include_deleted: Should the update include deleted items, this is
equivalent to read_deleted.
:param project_only: Should the query be limited to context's project.
:param order: Specific order of fields in which to update the values
:returns number of db rows that were updated.
"""
return IMPL.conditional_update(context, model, values, expected_values,
filters, include_deleted, project_only,
order)
| ge0rgi/cinder | cinder/db/api.py | Python | apache-2.0 | 67,859 |
import lmfit
from calculations.equations import FitParam
def ppmsi_function(params, subsa, rate, subsb, *_):
"""
Ping pong mechanism equation for optimization
"""
v_max = params['v_max'].value
kdb = params['kdb'].value
kma = params['kma'].value
kmb = params['kmb'].value
rate_calc = v_max * subsb * subsa / (kmb * subsa + (kma * subsb) * (1 + (subsb / kdb)) + subsa * subsb)
return rate_calc - rate
def create_ppmsi(v_max, kdb, kma, kmb, *_):
"""
Create Ping pong mechanism equation
"""
def eq(subsa, subsb):
return v_max * subsb * subsa / (kmb * subsa + (kma * subsb) * (1 + (subsb / kdb)) + subsa * subsb)
return eq
class PPMSIparams(FitParam):
"""
lmfit parameter class for Ping pong mechanism fitting.
"""
def __init__(self, v_max=(1, 0, 100), kdb=(1, 0, 100), kma=(1, 0, 100), kmb=(1, 0, 100)):
FitParam.__int__(self)
lmfit.Parameters.__init__(self)
self.add('v_max', value=v_max[0], min=v_max[1], max=v_max[2])
self.add('kdb', value=kdb[0], min=kdb[1], max=kdb[2])
self.add('kma', value=kma[0], min=kma[1], max=kma[2])
self.add('kmb', value=kmb[0], min=kmb[1], max=kmb[2])
self.fitf = ppmsi_function
self.eq = create_ppmsi
self.name = 'Ping-pong (substrate inhibition)'
self.param_order = ['v_max', 'kdb', 'kma', 'kmb']
self.units = ['r', 'c', 'c', 'c']
def ppm_function(params, subsa, rate, subsb, *_):
"""
Ping pong mechanism equation for optimization
"""
v_max = params['v_max'].value
kma = params['kma'].value
kmb = params['kmb'].value
rate_calc = v_max * subsb * subsa / (kmb * subsa + (kma * subsb) + subsa * subsb)
return rate_calc - rate
def create_ppm(v_max, kma, kmb, *_):
"""
Create Ping pong mechanism equation
"""
def eq(subsa, subsb):
return v_max * subsb * subsa / ((kmb * subsa) + (kma * subsb) + (subsa * subsb))
return eq
class PPMparams(FitParam):
"""
lmfit parameter class for Ping pong mechanism fitting.
"""
def __init__(self, v_max=(1, 0, 100), kma=(1, 0, 100), kmb=(1, 0, 100)):
FitParam.__int__(self)
lmfit.Parameters.__init__(self)
self.add('v_max', value=v_max[0], min=v_max[1], max=v_max[2])
self.add('kma', value=kma[0], min=kma[1], max=kma[2])
self.add('kmb', value=kmb[0], min=kmb[1], max=kmb[2])
self.fitf = ppm_function
self.eq = create_ppm
self.name = 'Ping-pong mechanism'
self.param_order = ['v_max', 'kma', 'kmb']
self.units = ['r', 'c', 'c'] | miha-skalic/ITEKA | calculations/equations/PPM_eq.py | Python | gpl-3.0 | 2,624 |
"""
Definition of urls for knodj_test.
"""
from datetime import datetime
from django.conf.urls import patterns, url
from app.forms import BootstrapAuthenticationForm
# Uncomment the next lines to enable the admin:
# from django.conf.urls import include
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'app.views.home', name='home'),
url(r'^contact$', 'app.views.contact', name='contact'),
url(r'^about', 'app.views.about', name='about'),
url(r'^login/$',
'django.contrib.auth.views.login',
{
'template_name': 'app/login.html',
'authentication_form': BootstrapAuthenticationForm,
'extra_context':
{
'title':'Log in',
'year':datetime.now().year,
}
},
name='login'),
url(r'^logout$',
'django.contrib.auth.views.logout',
{
'next_page': '/',
},
name='logout'),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
| mkennedy04/knodj | knodj_test/urls.py | Python | mit | 1,256 |
# Copyright (c) 2016-2019 The University of Manchester
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
| project-rig/spalloc | tests/__init__.py | Python | gpl-2.0 | 691 |
import os
import sys
from direct.showbase.ShowBase import ShowBase
import panda3d.core as p3d
import blenderpanda
import inputmapper
from nitrogen import gamestates
if hasattr(sys, 'frozen'):
APP_ROOT_DIR = os.path.dirname(sys.executable)
else:
APP_ROOT_DIR = os.path.dirname(__file__)
if not APP_ROOT_DIR:
print("empty app_root_dir")
sys.exit()
# prc files to load sorted by load order
CONFIG_ROOT_DIR = os.path.join(APP_ROOT_DIR, 'config')
CONFIG_FILES = [
os.path.join(CONFIG_ROOT_DIR, 'game.prc'),
os.path.join(CONFIG_ROOT_DIR, 'user.prc'),
]
for config_file in CONFIG_FILES:
if os.path.exists(config_file):
print("Loading config file:", config_file)
config_file = p3d.Filename.from_os_specific(config_file)
p3d.load_prc_file(config_file)
else:
print("Could not find config file", config_file)
class GameApp(ShowBase):
def __init__(self):
ShowBase.__init__(self)
blenderpanda.init(self)
self.input_mapper = inputmapper.InputMapper(os.path.join(CONFIG_ROOT_DIR, 'input.conf'))
self.accept('quit', sys.exit)
self.disableMouse()
winprops = self.win.get_properties()
self.win.move_pointer(0, winprops.get_x_size() // 2, winprops.get_y_size() // 2)
winprops = p3d.WindowProperties()
winprops.set_mouse_mode(p3d.WindowProperties.M_confined)
self.win.request_properties(winprops)
self.current_state = gamestates.MainState()
def update_gamestate(task):
self.current_state.update(p3d.ClockObject.get_global_clock().get_dt())
return task.cont
self.taskMgr.add(update_gamestate, 'GameState')
def change_state(self, next_state):
self.current_state.cleanup()
self.current_state = next_state()
def main():
app = GameApp()
app.run()
if __name__ == '__main__':
main()
| Moguri/prototype-nitrogen | game/main.py | Python | apache-2.0 | 1,902 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2017 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.boore_atkinson_2011 import (BooreAtkinson2011,
Atkinson2008prime)
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
class BooreAtkinson2011TestCase(BaseGSIMTestCase):
GSIM_CLASS = BooreAtkinson2011
# Test data created using the code available on the website of
# D. Boore - http://daveboore.com/ (checked on 2014.01.08)
# Code name: nga08_gm_tmr.for
def test_mean_normal(self):
self.check('BA11/BA11_MEDIAN.csv',
max_discrep_percentage=1.1)
class Atkinson2008primeTestCase(BaseGSIMTestCase):
GSIM_CLASS = Atkinson2008prime
# Test data created using GMPE adjustment factor
def test_mean_normal(self):
self.check('BA11/A08_BA11_MEAN.csv',
max_discrep_percentage=1.1)
| gem/oq-hazardlib | openquake/hazardlib/tests/gsim/boore_atkinson_2011_test.py | Python | agpl-3.0 | 1,622 |
#!/usr/bin/python
# Copyright: (c) 2016, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: sefcontext
short_description: Manages SELinux file context mapping definitions
description:
- Manages SELinux file context mapping definitions.
- Similar to the C(semanage fcontext) command.
version_added: '2.2'
options:
target:
description:
- Target path (expression).
type: str
required: yes
aliases: [ path ]
ftype:
description:
- The file type that should have SELinux contexts applied.
- "The following file type options are available:"
- C(a) for all files,
- C(b) for block devices,
- C(c) for character devices,
- C(d) for directories,
- C(f) for regular files,
- C(l) for symbolic links,
- C(p) for named pipes,
- C(s) for socket files.
type: str
default: a
setype:
description:
- SELinux type for the specified target.
required: yes
seuser:
description:
- SELinux user for the specified target.
type: str
selevel:
description:
- SELinux range for the specified target.
type: str
aliases: [ serange ]
state:
description:
- Whether the SELinux file context must be C(absent) or C(present).
type: str
choices: [ absent, present ]
default: present
reload:
description:
- Reload SELinux policy after commit.
- Note that this does not apply SELinux file contexts to existing files.
type: bool
default: 'yes'
notes:
- The changes are persistent across reboots.
- The M(sefcontext) module does not modify existing files to the new
SELinux context(s), so it is advisable to first create the SELinux
file contexts before creating files, or run C(restorecon) manually
for the existing files that require the new SELinux file contexts.
- Not applying SELinux fcontexts to existing files is a deliberate
decision as it would be unclear what reported changes would entail
to, and there's no guarantee that applying SELinux fcontext does
not pick up other unrelated prior changes.
requirements:
- libselinux-python
- policycoreutils-python
author:
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Allow apache to modify files in /srv/git_repos
sefcontext:
target: '/srv/git_repos(/.*)?'
setype: httpd_git_rw_content_t
state: present
- name: Apply new SELinux file context to filesystem
command: restorecon -irv /srv/git_repos
'''
RETURN = r'''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils._text import to_native
try:
import selinux
HAVE_SELINUX = True
except ImportError:
HAVE_SELINUX = False
try:
import seobject
HAVE_SEOBJECT = True
except ImportError:
HAVE_SEOBJECT = False
# Add missing entries (backward compatible)
if HAVE_SEOBJECT:
seobject.file_types.update(dict(
a=seobject.SEMANAGE_FCONTEXT_ALL,
b=seobject.SEMANAGE_FCONTEXT_BLOCK,
c=seobject.SEMANAGE_FCONTEXT_CHAR,
d=seobject.SEMANAGE_FCONTEXT_DIR,
f=seobject.SEMANAGE_FCONTEXT_REG,
l=seobject.SEMANAGE_FCONTEXT_LINK,
p=seobject.SEMANAGE_FCONTEXT_PIPE,
s=seobject.SEMANAGE_FCONTEXT_SOCK,
))
# Make backward compatible
option_to_file_type_str = dict(
a='all files',
b='block device',
c='character device',
d='directory',
f='regular file',
l='symbolic link',
p='named pipe',
s='socket file',
)
def semanage_fcontext_exists(sefcontext, target, ftype):
''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. '''
# Beware that records comprise of a string representation of the file_type
record = (target, option_to_file_type_str[ftype])
records = sefcontext.get_all()
try:
return records[record]
except KeyError:
return None
def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''):
''' Add or modify SELinux file context mapping definition to the policy. '''
changed = False
prepared_diff = ''
try:
sefcontext = seobject.fcontextRecords(sestore)
sefcontext.set_reload(do_reload)
exists = semanage_fcontext_exists(sefcontext, target, ftype)
if exists:
# Modify existing entry
orig_seuser, orig_serole, orig_setype, orig_serange = exists
if seuser is None:
seuser = orig_seuser
if serange is None:
serange = orig_serange
if setype != orig_setype or seuser != orig_seuser or serange != orig_serange:
if not module.check_mode:
sefcontext.modify(target, setype, ftype, serange, seuser)
changed = True
if module._diff:
prepared_diff += '# Change to semanage file context mappings\n'
prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange)
prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange)
else:
# Add missing entry
if seuser is None:
seuser = 'system_u'
if serange is None:
serange = 's0'
if not module.check_mode:
sefcontext.add(target, setype, ftype, serange, seuser)
changed = True
if module._diff:
prepared_diff += '# Addition to semanage file context mappings\n'
prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange)
except Exception:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
if module._diff and prepared_diff:
result['diff'] = dict(prepared=prepared_diff)
module.exit_json(changed=changed, seuser=seuser, serange=serange, **result)
def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''):
''' Delete SELinux file context mapping definition from the policy. '''
changed = False
prepared_diff = ''
try:
sefcontext = seobject.fcontextRecords(sestore)
sefcontext.set_reload(do_reload)
exists = semanage_fcontext_exists(sefcontext, target, ftype)
if exists:
# Remove existing entry
orig_seuser, orig_serole, orig_setype, orig_serange = exists
if not module.check_mode:
sefcontext.delete(target, ftype)
changed = True
if module._diff:
prepared_diff += '# Deletion to semanage file context mappings\n'
prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3])
except Exception:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
if module._diff and prepared_diff:
result['diff'] = dict(prepared=prepared_diff)
module.exit_json(changed=changed, **result)
def main():
module = AnsibleModule(
argument_spec=dict(
target=dict(required=True, aliases=['path']),
ftype=dict(type='str', default='a', choices=option_to_file_type_str.keys()),
setype=dict(type='str', required=True),
seuser=dict(type='str'),
selevel=dict(type='str', aliases=['serange']),
state=dict(type='str', default='present', choices=['absent', 'present']),
reload=dict(type='bool', default=True),
),
supports_check_mode=True,
)
if not HAVE_SELINUX:
module.fail_json(msg="This module requires libselinux-python")
if not HAVE_SEOBJECT:
module.fail_json(msg="This module requires policycoreutils-python")
if not selinux.is_selinux_enabled():
module.fail_json(msg="SELinux is disabled on this host.")
target = module.params['target']
ftype = module.params['ftype']
setype = module.params['setype']
seuser = module.params['seuser']
serange = module.params['selevel']
state = module.params['state']
do_reload = module.params['reload']
result = dict(target=target, ftype=ftype, setype=setype, state=state)
if state == 'present':
semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser)
elif state == 'absent':
semanage_fcontext_delete(module, result, target, ftype, do_reload)
else:
module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
if __name__ == '__main__':
main()
| slohse/ansible | lib/ansible/modules/system/sefcontext.py | Python | gpl-3.0 | 9,116 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2015 Douglas Blank <doug.blank@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from gramps.gen.plug._pluginreg import register, STABLE, DATABASE
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
register(DATABASE,
id = 'dbapi',
name = _("DB-API"),
name_accell = _("DB-_API Database"),
description = _("DB-API Database"),
version = '1.0.32',
gramps_target_version = "5.1",
status = STABLE,
fname = 'dbapi.py',
databaseclass = 'DBAPI',
authors=['Doug Blank'],
authors_email=["doug.blank@gmail.com"],
)
| ennoborg/gramps | gramps/plugins/db/dbapi/dbapi.gpr.py | Python | gpl-2.0 | 1,381 |
"""distutils.util
Miscellaneous utility functions -- anything that doesn't fit into
one of the other *util.py modules.
"""
__revision__ = "$Id$"
import sys, os, string, re
from distutils.errors import DistutilsPlatformError
from distutils.dep_util import newer
from distutils.spawn import spawn
from distutils import log
from distutils.errors import DistutilsByteCompileError
def get_platform ():
"""Return a string that identifies the current platform. This is used
mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = string.find(sys.version, prefix)
if i == -1:
return sys.platform
j = string.find(sys.version, ")", i)
look = sys.version[i+len(prefix):j].lower()
if look=='amd64':
return 'win-amd64'
if look=='itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
(osname, host, release, version, machine) = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = string.lower(osname)
osname = string.replace(osname, '/', '')
machine = string.replace(machine, ' ', '_')
machine = string.replace(machine, '/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile (r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
from distutils.sysconfig import get_config_vars
cfgvars = get_config_vars()
macver = os.environ.get('MACOSX_DEPLOYMENT_TARGET')
if not macver:
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if 1:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(
r'<key>ProductUserVisibleVersion</key>\s*' +
r'<string>(.*?)</string>', f.read())
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
finally:
f.close()
if not macver:
macver = macrelease
if macver:
from distutils.sysconfig import get_config_vars
release = macver
osname = "macosx"
if (macrelease + '.') >= '10.4.' and \
'-arch' in get_config_vars().get('CFLAGS', '').strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r"%(archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxint >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
machine = 'ppc'
# See 'i386' case
if sys.maxint >= 2**32:
machine = 'ppc64'
return "%s-%s-%s" % (osname, release, machine)
# get_platform ()
def convert_path (pathname):
"""Return 'pathname' as a name that will work on the native filesystem,
i.e. split it on '/' and put it back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError, "path '%s' cannot be absolute" % pathname
if pathname[-1] == '/':
raise ValueError, "path '%s' cannot end with '/'" % pathname
paths = string.split(pathname, '/')
while '.' in paths:
paths.remove('.')
if not paths:
return os.curdir
return os.path.join(*paths)
# convert_path ()
def change_root (new_root, pathname):
"""Return 'pathname' with 'new_root' prepended. If 'pathname' is
relative, this is equivalent to "os.path.join(new_root,pathname)".
Otherwise, it requires making 'pathname' relative and then joining the
two, which is tricky on DOS/Windows and Mac OS.
"""
if os.name == 'posix':
if not os.path.isabs(pathname):
return os.path.join(new_root, pathname)
else:
return os.path.join(new_root, pathname[1:])
elif os.name == 'nt':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == '\\':
path = path[1:]
return os.path.join(new_root, path)
elif os.name == 'os2':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == os.sep:
path = path[1:]
return os.path.join(new_root, path)
else:
raise DistutilsPlatformError, \
"nothing known about platform '%s'" % os.name
_environ_checked = 0
def check_environ ():
"""Ensure that 'os.environ' has all the environment variables we
guarantee that users can use in config files, command-line options,
etc. Currently this includes:
HOME - user's home directory (Unix only)
PLAT - description of the current platform, including hardware
and OS (see 'get_platform()')
"""
global _environ_checked
if _environ_checked:
return
if os.name == 'posix' and 'HOME' not in os.environ:
import pwd
os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
if 'PLAT' not in os.environ:
os.environ['PLAT'] = get_platform()
_environ_checked = 1
def subst_vars (s, local_vars):
"""Perform shell/Perl-style variable substitution on 'string'. Every
occurrence of '$' followed by a name is considered a variable, and
variable is substituted by the value found in the 'local_vars'
dictionary, or in 'os.environ' if it's not in 'local_vars'.
'os.environ' is first checked/augmented to guarantee that it contains
certain values: see 'check_environ()'. Raise ValueError for any
variables not found in either 'local_vars' or 'os.environ'.
"""
check_environ()
def _subst (match, local_vars=local_vars):
var_name = match.group(1)
if var_name in local_vars:
return str(local_vars[var_name])
else:
return os.environ[var_name]
try:
return re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
except KeyError, var:
raise ValueError, "invalid variable '$%s'" % var
# subst_vars ()
def grok_environment_error (exc, prefix="error: "):
"""Generate a useful error message from an EnvironmentError (IOError or
OSError) exception object. Handles Python 1.5.1 and 1.5.2 styles, and
does what it can to deal with exception objects that don't have a
filename (which happens when the error is due to a two-file operation,
such as 'rename()' or 'link()'. Returns the error message as a string
prefixed with 'prefix'.
"""
# check for Python 1.5.2-style {IO,OS}Error exception objects
if hasattr(exc, 'filename') and hasattr(exc, 'strerror'):
if exc.filename:
error = prefix + "%s: %s" % (exc.filename, exc.strerror)
else:
# two-argument functions in posix module don't
# include the filename in the exception object!
error = prefix + "%s" % exc.strerror
else:
error = prefix + str(exc[-1])
return error
# Needed by 'split_quoted()'
_wordchars_re = _squote_re = _dquote_re = None
def _init_regex():
global _wordchars_re, _squote_re, _dquote_re
_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
def split_quoted (s):
"""Split a string up according to Unix shell-like rules for quotes and
backslashes. In short: words are delimited by spaces, as long as those
spaces are not escaped by a backslash, or inside a quoted string.
Single and double quotes are equivalent, and the quote characters can
be backslash-escaped. The backslash is stripped from any two-character
escape sequence, leaving only the escaped character. The quote
characters are stripped from any quoted string. Returns a list of
words.
"""
# This is a nice algorithm for splitting up a single string, since it
# doesn't require character-by-character examination. It was a little
# bit of a brain-bender to get it working right, though...
if _wordchars_re is None: _init_regex()
s = string.strip(s)
words = []
pos = 0
while s:
m = _wordchars_re.match(s, pos)
end = m.end()
if end == len(s):
words.append(s[:end])
break
if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
words.append(s[:end]) # we definitely have a word delimiter
s = string.lstrip(s[end:])
pos = 0
elif s[end] == '\\': # preserve whatever is being escaped;
# will become part of the current word
s = s[:end] + s[end+1:]
pos = end+1
else:
if s[end] == "'": # slurp singly-quoted string
m = _squote_re.match(s, end)
elif s[end] == '"': # slurp doubly-quoted string
m = _dquote_re.match(s, end)
else:
raise RuntimeError, \
"this can't happen (bad char '%c')" % s[end]
if m is None:
raise ValueError, \
"bad string (mismatched %s quotes?)" % s[end]
(beg, end) = m.span()
s = s[:beg] + s[beg+1:end-1] + s[end:]
pos = m.end() - 2
if pos >= len(s):
words.append(s)
break
return words
# split_quoted ()
def execute (func, args, msg=None, verbose=0, dry_run=0):
"""Perform some action that affects the outside world (eg. by
writing to the filesystem). Such actions are special because they
are disabled by the 'dry_run' flag. This method takes care of all
that bureaucracy for you; all you have to do is supply the
function to call and an argument tuple for it (to embody the
"external action" being performed), and an optional message to
print.
"""
if msg is None:
msg = "%s%r" % (func.__name__, args)
if msg[-2:] == ',)': # correct for singleton tuple
msg = msg[0:-2] + ')'
log.info(msg)
if not dry_run:
func(*args)
def strtobool (val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
"""
val = string.lower(val)
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError, "invalid truth value %r" % (val,)
def byte_compile (py_files,
optimize=0, force=0,
prefix=None, base_dir=None,
verbose=1, dry_run=0,
direct=None):
"""Byte-compile a collection of Python source files to either .pyc
or .pyo files in the same directory. 'py_files' is a list of files
to compile; any files that don't end in ".py" are silently skipped.
'optimize' must be one of the following:
0 - don't optimize (generate .pyc)
1 - normal optimization (like "python -O")
2 - extra optimization (like "python -OO")
If 'force' is true, all files are recompiled regardless of
timestamps.
The source filename encoded in each bytecode file defaults to the
filenames listed in 'py_files'; you can modify these with 'prefix' and
'basedir'. 'prefix' is a string that will be stripped off of each
source filename, and 'base_dir' is a directory name that will be
prepended (after 'prefix' is stripped). You can supply either or both
(or neither) of 'prefix' and 'base_dir', as you wish.
If 'dry_run' is true, doesn't actually do anything that would
affect the filesystem.
Byte-compilation is either done directly in this interpreter process
with the standard py_compile module, or indirectly by writing a
temporary script and executing it. Normally, you should let
'byte_compile()' figure out to use direct compilation or not (see
the source for details). The 'direct' flag is used by the script
generated in indirect mode; unless you know what you're doing, leave
it set to None.
"""
# nothing is done if sys.dont_write_bytecode is True
if sys.dont_write_bytecode:
raise DistutilsByteCompileError('byte-compiling is disabled.')
# First, if the caller didn't force us into direct or indirect mode,
# figure out which mode we should be in. We take a conservative
# approach: choose direct mode *only* if the current interpreter is
# in debug mode and optimize is 0. If we're not in debug mode (-O
# or -OO), we don't know which level of optimization this
# interpreter is running with, so we can't do direct
# byte-compilation and be certain that it's the right thing. Thus,
# always compile indirectly if the current interpreter is in either
# optimize mode, or if either optimization level was requested by
# the caller.
if direct is None:
direct = (__debug__ and optimize == 0)
# "Indirect" byte-compilation: write a temporary script and then
# run it with the appropriate flags.
if not direct:
try:
from tempfile import mkstemp
(script_fd, script_name) = mkstemp(".py")
except ImportError:
from tempfile import mktemp
(script_fd, script_name) = None, mktemp(".py")
log.info("writing byte-compilation script '%s'", script_name)
if not dry_run:
if script_fd is not None:
script = os.fdopen(script_fd, "w")
else:
script = open(script_name, "w")
script.write("""\
from distutils.util import byte_compile
files = [
""")
# XXX would be nice to write absolute filenames, just for
# safety's sake (script should be more robust in the face of
# chdir'ing before running it). But this requires abspath'ing
# 'prefix' as well, and that breaks the hack in build_lib's
# 'byte_compile()' method that carefully tacks on a trailing
# slash (os.sep really) to make sure the prefix here is "just
# right". This whole prefix business is rather delicate -- the
# problem is that it's really a directory, but I'm treating it
# as a dumb string, so trailing slashes and so forth matter.
#py_files = map(os.path.abspath, py_files)
#if prefix:
# prefix = os.path.abspath(prefix)
script.write(string.join(map(repr, py_files), ",\n") + "]\n")
script.write("""
byte_compile(files, optimize=%r, force=%r,
prefix=%r, base_dir=%r,
verbose=%r, dry_run=0,
direct=1)
""" % (optimize, force, prefix, base_dir, verbose))
script.close()
cmd = [sys.executable, script_name]
if optimize == 1:
cmd.insert(1, "-O")
elif optimize == 2:
cmd.insert(1, "-OO")
spawn(cmd, dry_run=dry_run)
execute(os.remove, (script_name,), "removing %s" % script_name,
dry_run=dry_run)
# "Direct" byte-compilation: use the py_compile module to compile
# right here, right now. Note that the script generated in indirect
# mode simply calls 'byte_compile()' in direct mode, a weird sort of
# cross-process recursion. Hey, it works!
else:
from py_compile import compile
for file in py_files:
if file[-3:] != ".py":
# This lets us be lazy and not filter filenames in
# the "install_lib" command.
continue
# Terminology from the py_compile module:
# cfile - byte-compiled file
# dfile - purported source filename (same as 'file' by default)
cfile = file + (__debug__ and "c" or "o")
dfile = file
if prefix:
if file[:len(prefix)] != prefix:
raise ValueError, \
("invalid prefix: filename %r doesn't start with %r"
% (file, prefix))
dfile = dfile[len(prefix):]
if base_dir:
dfile = os.path.join(base_dir, dfile)
cfile_base = os.path.basename(cfile)
if direct:
if force or newer(file, cfile):
log.info("byte-compiling %s to %s", file, cfile_base)
if not dry_run:
compile(file, cfile, dfile)
else:
log.debug("skipping byte-compilation of %s to %s",
file, cfile_base)
# byte_compile ()
def rfc822_escape (header):
"""Return a version of the string escaped for inclusion in an
RFC-822 header, by ensuring there are 8 spaces space after each newline.
"""
lines = string.split(header, '\n')
header = string.join(lines, '\n' + 8*' ')
return header
| microdee/IronHydra | src/IronHydra/Lib/distutils/util.py | Python | mit | 21,667 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('cerci_issue', '0003_auto_20151003_1529'),
]
operations = [
migrations.AlterField(
model_name='issue',
name='cover_video',
field=models.FileField(upload_to=b'videos/issue/', null=True, verbose_name='Cover Video', blank=True),
),
migrations.AlterField(
model_name='issue',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2015, 10, 11, 10, 32, 37, 292927, tzinfo=utc), verbose_name='Olu\u015fturulma Tarihi'),
),
]
| cercisanat/cercisanat.com | cerci_issue/migrations/0004_auto_20151011_1332.py | Python | gpl-3.0 | 786 |
from constants import *
def consonant_coda(word):
return coda(CONSONANTS_REGEX, word)
def obstruent_coda(word):
return coda(OBSTRUENT_REGEX, word)
def coda(regex, word):
if word[-1] in regex: return 1
else: return 0
| russmatney/unicode-classification-engine | features/coda.py | Python | mit | 228 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import Union
from typing import Type
from typing import Any
import datetime
from libcloud import __version__
from libcloud.common.base import Connection
from libcloud.common.base import ConnectionUserAndKey, BaseDriver
from libcloud.dns.types import RecordType
__all__ = [
'Zone',
'Record',
'DNSDriver'
]
class Zone(object):
"""
DNS zone.
"""
def __init__(self,
id, # type: str
domain, # type: str
type, # type: str
ttl, # type: int
driver, # type: DNSDriver
extra=None # type: dict
):
"""
:param id: Zone id.
:type id: ``str``
:param domain: The name of the domain.
:type domain: ``str``
:param type: Zone type (master, slave).
:type type: ``str``
:param ttl: Default TTL for records in this zone (in seconds).
:type ttl: ``int``
:param driver: DNSDriver instance.
:type driver: :class:`DNSDriver`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.domain = domain
self.type = type
self.ttl = ttl or None
self.driver = driver
self.extra = extra or {}
def list_records(self):
# type: () -> List[Record]
return self.driver.list_records(zone=self)
def create_record(self, name, type, data, extra=None):
# type: (str, RecordType, str, Optional[dict]) -> Record
return self.driver.create_record(name=name, zone=self, type=type,
data=data, extra=extra)
def update(self,
domain=None, # type: Optional[str]
type=None, # type: Optional[str]
ttl=None, # type: Optional[int]
extra=None # type: Optional[dict]
):
# type: (...) -> Zone
return self.driver.update_zone(zone=self, domain=domain, type=type,
ttl=ttl, extra=extra)
def delete(self):
# type: () -> bool
return self.driver.delete_zone(zone=self)
def export_to_bind_format(self):
# type: () -> str
return self.driver.export_zone_to_bind_format(zone=self)
def export_to_bind_zone_file(self, file_path):
# type: (str) -> None
self.driver.export_zone_to_bind_zone_file(zone=self,
file_path=file_path)
def __repr__(self):
# type: () -> str
return ('<Zone: domain=%s, ttl=%s, provider=%s ...>' %
(self.domain, self.ttl, self.driver.name))
class Record(object):
"""
Zone record / resource.
"""
def __init__(self,
id, # type: str
name, # type: str
type, # type: RecordType
data, # type: str
zone, # type: Zone
driver, # type: DNSDriver
ttl=None, # type: int
extra=None # type: dict
):
"""
:param id: Record id
:type id: ``str``
:param name: Hostname or FQDN.
:type name: ``str``
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param zone: Zone instance.
:type zone: :class:`Zone`
:param driver: DNSDriver instance.
:type driver: :class:`DNSDriver`
:param ttl: Record TTL.
:type ttl: ``int``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.name = name
self.type = type
self.data = data
self.zone = zone
self.driver = driver
self.ttl = ttl
self.extra = extra or {}
def update(self,
name=None, # type: Optional[str]
type=None, # type: Optional[RecordType]
data=None, # type: Optional[str]
extra=None # type: Optional[dict]
):
# type: (...) -> Record
return self.driver.update_record(record=self, name=name, type=type,
data=data, extra=extra)
def delete(self):
# type: () -> bool
return self.driver.delete_record(record=self)
def _get_numeric_id(self):
# type: () -> Union[int, str]
"""
Return numeric ID for the provided record if the ID is a digit.
This method is used for sorting the values when exporting Zone to a
BIND format.
"""
record_id = self.id
if record_id is None:
return ''
if record_id.isdigit():
record_id_int = int(record_id)
return record_id_int
return record_id
def __repr__(self):
# type: () -> str
zone = self.zone.domain if self.zone.domain else self.zone.id
return ('<Record: zone=%s, name=%s, type=%s, data=%s, provider=%s, '
'ttl=%s ...>' %
(zone, self.name, self.type, self.data,
self.driver.name, self.ttl))
class DNSDriver(BaseDriver):
"""
A base DNSDriver class to derive from
This class is always subclassed by a specific driver.
"""
connectionCls = ConnectionUserAndKey # type: Type[Connection]
name = None # type: str
website = None # type: str
# Map libcloud record type enum to provider record type name
RECORD_TYPE_MAP = {} # type: Dict[RecordType, str]
def __init__(self,
key, # type: str
secret=None, # type: Optional[str]
secure=True, # type: bool
host=None, # type: Optional[str]
port=None, # type: Optional[int]
**kwargs # type: Optional[Any]
):
# type: (...) -> None
"""
:param key: API key or username to used (required)
:type key: ``str``
:param secret: Secret password to be used (required)
:type secret: ``str``
:param secure: Whether to use HTTPS or HTTP. Note: Some providers
only support HTTPS, and it is on by default.
:type secure: ``bool``
:param host: Override hostname used for connections.
:type host: ``str``
:param port: Override port used for connections.
:type port: ``int``
:return: ``None``
"""
super(DNSDriver, self).__init__(key=key, secret=secret, secure=secure,
host=host, port=port, **kwargs)
def list_record_types(self):
# type: () -> List[RecordType]
"""
Return a list of RecordType objects supported by the provider.
:return: ``list`` of :class:`RecordType`
"""
return list(self.RECORD_TYPE_MAP.keys())
def iterate_zones(self):
# type: () -> Iterator[Zone]
"""
Return a generator to iterate over available zones.
:rtype: ``generator`` of :class:`Zone`
"""
raise NotImplementedError(
'iterate_zones not implemented for this driver')
def list_zones(self):
# type: () -> List[Zone]
"""
Return a list of zones.
:return: ``list`` of :class:`Zone`
"""
return list(self.iterate_zones())
def iterate_records(self, zone):
# type: (Zone) -> Iterator[Record]
"""
Return a generator to iterate over records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:rtype: ``generator`` of :class:`Record`
"""
raise NotImplementedError(
'iterate_records not implemented for this driver')
def list_records(self, zone):
# type: (Zone) -> List[Record]
"""
Return a list of records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:return: ``list`` of :class:`Record`
"""
return list(self.iterate_records(zone))
def get_zone(self, zone_id):
# type: (str) -> Zone
"""
Return a Zone instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:rtype: :class:`Zone`
"""
raise NotImplementedError(
'get_zone not implemented for this driver')
def get_record(self, zone_id, record_id):
# type: (str, str) -> Record
"""
Return a Record instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:param record_id: ID of the required record
:type record_id: ``str``
:rtype: :class:`Record`
"""
raise NotImplementedError(
'get_record not implemented for this driver')
def create_zone(self, domain, type='master', ttl=None, extra=None):
# type: (str, str, Optional[int], Optional[dict]) -> Zone
"""
Create a new zone.
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param type: Zone type (master / slave).
:type type: ``str``
:param ttl: TTL for new records. (optional)
:type ttl: ``int``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Zone`
"""
raise NotImplementedError(
'create_zone not implemented for this driver')
def update_zone(self,
zone, # type: Zone
domain, # type: Optional[str]
type='master', # type: Optional[str]
ttl=None, # type: Optional[int]
extra=None # type: Optional[dict]
):
# type: (...) -> Zone
"""
Update an existing zone.
:param zone: Zone to update.
:type zone: :class:`Zone`
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param type: Zone type (master / slave).
:type type: ``str``
:param ttl: TTL for new records. (optional)
:type ttl: ``int``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Zone`
"""
raise NotImplementedError(
'update_zone not implemented for this driver')
def create_record(self, name, zone, type, data, extra=None):
# type: (str, Zone, RecordType, str, Optional[dict]) -> Record
"""
Create a new record.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone where the requested record is created.
:type zone: :class:`Zone`
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Record`
"""
raise NotImplementedError(
'create_record not implemented for this driver')
def update_record(self,
record, # type: Record
name, # type: Optional[str]
type, # type: Optional[RecordType]
data, # type: Optional[str]
extra=None # type: Optional[dict]
):
"""
Update an existing record.
:param record: Record to update.
:type record: :class:`Record`
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: :class:`Record`
"""
raise NotImplementedError(
'update_record not implemented for this driver')
def delete_zone(self, zone):
# type: (Zone) -> bool
"""
Delete a zone.
Note: This will delete all the records belonging to this zone.
:param zone: Zone to delete.
:type zone: :class:`Zone`
:rtype: ``bool``
"""
raise NotImplementedError(
'delete_zone not implemented for this driver')
def delete_record(self, record):
# type: (Record) -> bool
"""
Delete a record.
:param record: Record to delete.
:type record: :class:`Record`
:rtype: ``bool``
"""
raise NotImplementedError(
'delete_record not implemented for this driver')
def export_zone_to_bind_format(self, zone):
# type: (Zone) -> str
"""
Export Zone object to the BIND compatible format.
:param zone: Zone to export.
:type zone: :class:`Zone`
:return: Zone data in BIND compatible format.
:rtype: ``str``
"""
if zone.type != 'master':
raise ValueError('You can only generate BIND out for master zones')
lines = []
# For consistent output, records are sorted based on the id
records = zone.list_records()
records = sorted(records, key=Record._get_numeric_id)
date = datetime.datetime.now().strftime('%Y-%m-%d %H:%m:%S')
values = {'version': __version__, 'date': date}
lines.append('; Generated by Libcloud v%(version)s on %(date)s' %
values)
lines.append('$ORIGIN %(domain)s.' % {'domain': zone.domain})
lines.append('$TTL %(domain_ttl)s\n' % {'domain_ttl': zone.ttl})
for record in records:
line = self._get_bind_record_line(record=record)
lines.append(line)
output = '\n'.join(lines)
return output
def export_zone_to_bind_zone_file(self, zone, file_path):
# type: (Zone, str) -> None
"""
Export Zone object to the BIND compatible format and write result to a
file.
:param zone: Zone to export.
:type zone: :class:`Zone`
:param file_path: File path where the output will be saved.
:type file_path: ``str``
"""
result = self.export_zone_to_bind_format(zone=zone)
with open(file_path, 'w') as fp:
fp.write(result)
def _get_bind_record_line(self, record):
# type: (Record) -> str
"""
Generate BIND record line for the provided record.
:param record: Record to generate the line for.
:type record: :class:`Record`
:return: Bind compatible record line.
:rtype: ``str``
"""
parts = [] # type: List[Any]
if record.name:
name = '%(name)s.%(domain)s' % {'name': record.name,
'domain': record.zone.domain}
else:
name = record.zone.domain
name += '.'
ttl = record.extra['ttl'] if 'ttl' in record.extra else record.zone.ttl
ttl = str(ttl)
data = record.data
if record.type in [RecordType.CNAME, RecordType.DNAME, RecordType.MX,
RecordType.PTR, RecordType.SRV]:
# Make sure trailing dot is present
if data[len(data) - 1] != '.':
data += '.'
if record.type in [RecordType.TXT, RecordType.SPF] and ' ' in data:
# Escape the quotes
data = data.replace('"', '\\"')
# Quote the string
data = '"%s"' % (data)
if record.type in [RecordType.MX, RecordType.SRV]:
priority = str(record.extra['priority'])
parts = [name, ttl, 'IN', str(record.type), priority, data]
else:
parts = [name, ttl, 'IN', str(record.type), data]
line = '\t'.join(parts)
return line
def _string_to_record_type(self, string):
# type: (str) -> RecordType
"""
Return a string representation of a DNS record type to a
libcloud RecordType ENUM.
:rtype: ``str``
"""
string = string.upper()
record_type = getattr(RecordType, string)
return record_type
| mistio/libcloud | libcloud/dns/base.py | Python | apache-2.0 | 18,129 |
from bs4 import BeautifulSoup
import asyncio
from aiohttp import ClientSession
import pandas as pd
import time
time.sleep(20)
def parse_and_get(data):
soup = BeautifulSoup(data, 'html.parser')
tabs = soup.find_all('table')
names = [ sp.text for sp in tabs[2].find_all('span', attrs={'class': 'comment-name'})]
dates = [ sp.text for sp in tabs[2].find_all('span', attrs={'class': 'comment-date'})]
comments = [ sp.text for sp in tabs[2].find_all('div', attrs={'class': 'comment-text'})]
final_data = zip(names, dates, comments)
return pd.DataFrame(list(final_data))
async def get_and_store(page_no, session) -> pd.DataFrame:
url = 'http://trace.bharatiyamobile.com/showallsuggestions.php?id=tracemobile&page={}'.format(page_no)
async with session.get(url) as response:
data = await response.read()
return parse_and_get(data)
async def run(r):
tasks = []
async with ClientSession() as session:
for i in range(1,r+1):
task = asyncio.ensure_future(get_and_store(i, session))
tasks.append(task)
responses = await asyncio.gather(*tasks)
df = pd.concat(responses)
df.to_csv('/home/apoorv/Downloads/1.csv')
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(run(191))
loop.run_until_complete(future)
| apoorv-kumar/PyThugLife | RandomScripts/bharatiyamobile.py | Python | mit | 1,329 |
#
# This file is part of Mapnik (c++ mapping toolkit)
#
# Copyright (C) 2007 Artem Pavlenko, Jean-Francois Doyon
#
# Mapnik is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# $Id$
Import ('env')
prefix = env['PREFIX']
plugin_env = env.Clone()
osm_src = Split(
"""
osmparser.cpp
osm.cpp
osm_datasource.cpp
osm_featureset.cpp
dataset_deliverer.cpp
basiccurl.cpp
"""
)
libraries = [ 'xml2' ]
libraries.append('curl')
libraries.append('mapnik')
libraries.append(env['ICU_LIB_NAME'])
input_plugin = plugin_env.SharedLibrary('../osm', source=osm_src, SHLIBPREFIX='', SHLIBSUFFIX='.input', LIBS=libraries, LINKFLAGS=env['CUSTOM_LDFLAGS'])
# if the plugin links to libmapnik ensure it is built first
Depends(input_plugin, env.subst('../../../src/%s' % env['MAPNIK_LIB_NAME']))
if 'uninstall' not in COMMAND_LINE_TARGETS:
env.Install(env['MAPNIK_INPUT_PLUGINS_DEST'], input_plugin)
env.Alias('install', env['MAPNIK_INPUT_PLUGINS_DEST'])
| mojodna/debian-mapnik | plugins/input/osm/build.py | Python | lgpl-2.1 | 1,631 |
#!/usr/bin/env python
#
# Copyright (c) 2018 The heketi Authors
#
# This file is licensed to you under your choice of the GNU Lesser
# General Public License, version 3 or any later version (LGPLv3 or
# later), or the GNU General Public License, version 2 (GPLv2), in all
# cases as published by the Free Software Foundation.
#
import argparse
import json
import sys
import yaml
DESC = """
Compare outputs of gluster and/or heketi and/or openshift/k8s.
Prints lists of volumes where sources differ.
"""
EXAMPLE= """
Example:
$ python3 comparison.py
--gluster-info gluster-volume-info.txt
--heketi-json heketi-db.json
--pv-yaml openshift-pv-yaml.yaml
"""
def main():
parser = argparse.ArgumentParser(description=DESC, epilog=EXAMPLE)
parser.add_argument(
'--gluster-info', '-g',
help='Path to a file containing gluster volume info')
parser.add_argument(
'--heketi-json', '-j',
help='Path to a file containing Heketi db json export')
parser.add_argument(
'--pv-yaml', '-y',
help='Path to a file containing PV yaml data')
parser.add_argument(
'--skip-ok', '-K', action='store_true',
help='Exclude matching items from output')
parser.add_argument(
'--pending', action='store_true',
help='Show heketi pending status (best effort)')
parser.add_argument(
'--no-header', '-H', action='store_true',
help='Do not print column header')
parser.add_argument(
'--ignore', '-I', action='append',
help='Exlude given volume name (multiple allowed)')
cli = parser.parse_args()
check = []
gvinfo = heketi = pvdata = None
if cli.gluster_info:
check.append('gluster')
gvinfo = parse_gvinfo(cli.gluster_info)
if cli.heketi_json:
check.append('heketi')
heketi = parse_heketi(cli.heketi_json)
if cli.pv_yaml:
check.append('pvs')
pvdata = parse_oshift(cli.pv_yaml)
if not check:
parser.error(
"Must provide: --gluster-info OR --heketi-json OR --pv-yaml")
summary = compile_summary(gvinfo, heketi, pvdata)
for ign in (cli.ignore or []):
if summary.pop(ign, None):
sys.stderr.write('ignoring: {}\n'.format(ign))
compare(summary, check, cli.skip_ok,
header=(not cli.no_header),
show_pending=(cli.pending))
return
def parse_heketi(h_json):
with open(h_json) as fh:
return json.load(fh)
def parse_oshift(yf):
with open(yf) as fh:
return yaml.safe_load(fh)
def parse_gvlist(gvl):
vols = {}
with open(gvl) as fh:
for line in fh:
vols[line.strip()] = []
return vols
def parse_gvinfo(gvi):
vols = {}
volume = None
with open(gvi) as fh:
for line in fh:
l = line.strip()
if l.startswith("Volume Name:"):
volume = l.split(":", 1)[-1].strip()
vols[volume] = []
if l.startswith('Brick') and l != "Bricks:":
if volume is None:
raise ValueError("Got Brick before volume: %s" % l)
vols[volume].append(l.split(":", 1)[-1].strip())
return vols
def compile_heketi(summary, heketi):
for vid, v in heketi['volumeentries'].items():
n = v['Info']['name']
summary[n] = {'id': vid, 'heketi': True}
if v['Pending']['Id']:
summary[n]['heketi-pending'] = True
def compile_gvinfo(summary, gvinfo):
for vn in gvinfo:
if vn in summary:
summary[vn]['gluster'] = True
else:
summary[vn] = {'gluster': True}
def compile_pvdata(summary, pvdata):
for elem in pvdata['items']:
g = elem.get('spec', {}).get('glusterfs', {})
if not g:
continue
vn = g['path']
if vn in summary:
summary[vn]['pvs'] = True
else:
summary[vn] = {'pvs': True}
def compile_summary(gvinfo, heketi, pvdata):
summary = {}
if heketi:
compile_heketi(summary, heketi)
if gvinfo:
compile_gvinfo(summary, gvinfo)
if pvdata:
compile_pvdata(summary, pvdata)
return summary
def compare(summary, check, skip_ok=False, header=True, show_pending=False):
if header:
_print = Printer(['Volume-Name', 'Match', 'Volume-ID'])
else:
_print = Printer([])
for vn, vs in summary.items():
ok = all(vs.get(c) for c in check)
if ok and skip_ok:
continue
heketi_info = vs.get('id', '')
if show_pending and vs.get('heketi-pending'):
heketi_info += '/pending'
if ok:
_print.line(vn, 'ok', heketi_info)
else:
matches = ','.join(
sorted(k for k in check if vs.get(k)))
_print.line(vn, matches, heketi_info)
class Printer(object):
"""Utility class for printing columns w/ headers."""
def __init__(self, header):
self._did_header = False
self.header = header or []
def line(self, *columns):
if self.header and not self._did_header:
self._print_header(columns)
self._did_header = True
print (' '.join(columns))
def _print_header(self, columns):
parts = []
for idx, hdr in enumerate(self.header):
pad = max(0, len(columns[idx]) - len(hdr))
parts.append('{}{}'.format(hdr, ' ' * pad))
print (' '.join(parts))
if __name__ == '__main__':
main()
| enj/origin | vendor/github.com/heketi/heketi/extras/tools/comparison.py | Python | apache-2.0 | 5,562 |
from django import forms
from .models import Topic, Entry
class TopicForm(forms.ModelForm):
class Meta:
model = Topic
fields = ['text']
labels = {'text': ''}
class EntryForm(forms.ModelForm):
"""录入文章"""
class Meta:
model = Entry
fields = ['text']
labels = {'text': ''}
widgets = {'text': forms.Textarea(attrs={'cols': 80})}
| lluxury/pcc_exercise | learning_logs/learning_logs/forms.py | Python | mit | 404 |
#!/bin/python
""" scenario_import
Usage:
scenario_import.py [options] NODE_DATA SEQ_DATA RESULTS_DATA
scenario_import.py -h | --help
Examples:
python scenario_import.py -p open_eGo -c 'Increased net transfer capacity NO'
path/to/scenario.csv path/to/scenario-seq.csv path/to/results.csv
Arguments:
NODE_DATA CSV-file containing data for nodes and flows.
SEQ_DATA CSV-file with data for sequences.
RESULTS_DATA CSV-file containing results.
Options:
-h --help Show this screen and exit.
-n --name=NAME Name of the scenario. [default: '']
-p --project=PROJECT Name of the related project. [default: open_eGo]
-c --comment=COMMENT Provide a comment. [default: '']
-v --version=VERSION Provide the version of renpass_gis.
[default: v0.1]
--repository If NODE_DATA is under git version control the
repository name and current commit is added.
--sep=SEP Delimiter used in CSV-files. [default: ,]
Notes:
For DB connectivity a configuration file ~.open_eGo/config.ini containing
the following section is needed.
[oedb]
username =
password =
host =
port =
"""
__copyright__ = "ZNES"
__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)"
__url__ = "https://github.com/openego/data_processing/blob/master/LICENSE"
__author__ = "s3pp"
import subprocess
import pandas as pd
from docopt import docopt
from pathlib import Path
from subprocess import CalledProcessError
def asfloat(x):
try:
# do not convert a boolean
return float(x) if not isinstance(x, bool) else x
except ValueError:
return x
def read_input(**kwargs):
""" Read CSV-files
Parameters
----------
**kwargs : key word arguments
Arguments passed from command line
Returns
-------
nodes_flows : DataFrame
Containing data for nodes and flows.
nodes_flows_seq: DataFrame
Data for sequences.
"""
nodes_flows = pd.read_csv(kwargs['NODE_DATA'], sep=kwargs['--sep'])
nodes_flows_seq = pd.read_csv(kwargs['SEQ_DATA'],
sep=kwargs['--sep'],
header=None)
nodes_flows_seq.dropna(axis=0, how='all', inplace=True)
nodes_flows_seq.drop(0, axis=1, inplace=True)
nodes_flows_seq = nodes_flows_seq.transpose()
nodes_flows_seq.set_index([0, 1, 2, 3, 4], inplace=True)
nodes_flows_seq.columns = range(0, len(nodes_flows_seq.columns))
nodes_flows_seq = nodes_flows_seq.astype(float)
return nodes_flows, nodes_flows_seq
def check_git(**kwargs):
""" Extract git information
Parameters
----------
**kwargs : key word arguments
Arguments passed from command line
Returns
-------
whichrepo : str
Name of the git repository.
commit : str
short git commit hash
Raises
------
CalledProcessError
Parent directory no git repository
Note
----
Update global arguments with name of the repository and commit hash.
"""
global arguments
pardir = str(Path(kwargs['NODE_DATA']).parent)
try:
args = "basename `git rev-parse --show-toplevel`"
output = subprocess.check_output(args, shell=True, cwd=pardir)
arguments.update({'whichrepo': str(output, 'utf-8').rstrip()})
args = ['git', 'rev-parse', '--short', 'HEAD']
output = subprocess.check_output(args, cwd=pardir)
arguments.update({'commit': str(output, 'utf-8').rstrip()})
except CalledProcessError:
print('"%s" seems not to be a git repository!' % pardir)
def write_info(**kwargs):
""" Write key information about the scenario to the DB.
Parameters
----------
**kwargs : key word arguments
Arguments passed from command line
Note
----
Update global dictionary with key 'id' (scenario_id) value as returned by
DB.
"""
global arguments
whichrepo = arguments.get('whichrepo', '')
commit = arguments.get('commit', '')
scenario = Scenario(name=kwargs['--name'],
project=kwargs['--project'],
repository=whichrepo,
commit=commit,
compatibility=kwargs['--version'],
comment=kwargs['--comment'])
session.add(scenario)
session.commit()
arguments.update({'id': scenario.id})
def write_data(nodes_flows, nodes_flows_seq, **kwargs):
""" Write scenario data to the DB.
Parameters
----------
nodes_flows : pandas.DataFrame
DataFrame with data for nodes and corresponding flows
nodes_flows_seq : pandas.DataFrame
DataFrame with sequence data for nodes/flows
**kwargs : key word arguments
Arguments passed from command line
"""
global arguments
# dictionary with all mapped tables
class_dict = {'Source': Source, 'LinearTransformer': LinearTransformer,
'Sink': Sink, 'Storage': Storage}
empty = []
for idx, series in nodes_flows.iterrows():
# filter for empty rows
series = series.dropna()
try:
obj = class_dict[series['class']]()
except KeyError:
empty.append(idx)
continue
# map table fields to keys of row series
for col in obj.__table__.columns.keys():
ignore = ['id']
if col == 'scenario_id':
setattr(obj, col, arguments['id'])
continue
if col not in ignore and col in series:
prop = asfloat(series[col])
if prop == 'seq':
seq = nodes_flows_seq.loc[series['class'],
series['label'],
series['source'],
series['target'],
col]
setattr(obj, col, list(seq))
elif isinstance(prop, float):
setattr(obj, col, [prop])
else:
setattr(obj, col, prop)
session.add(obj)
for l in empty:
print('Comment or empty row at index %s.' % l)
session.commit()
def write_results(**kwargs):
""" Write results file to DB.
Parameters
----------
**kwargs : key word arguments
Arguments passed from command line
Note
----
For higher performance a single insert statement is created.
Faster than using SQLA ORM and SQLA CORE as described here:
http://www.devx.com/dbzone/optimize-inserts-using-sqlalchemy.html
"""
global arguments
# read csv, add scenario_id
df = pd.read_csv(kwargs['RESULTS_DATA'])
df.columns = [c.lower() for c in df.columns]
df.insert(0, 'scenario_id', arguments['id'])
# single quotes around string cols necessary for query statement
cols = ['datetime', 'bus_label', 'type', 'obj_label']
df[cols] = df[cols].applymap(lambda x: "'" + x + "'")
# best solution performance wise, a single insert statement is created
records = [tuple(x) for x in df.values]
records = ",".join("(" + ",".join(str(i) for i in r) + ")" for
r in records)
target = Results.__table__.schema + '.' + Results.__table__.name
head = ("INSERT INTO " + target + " (scenario_id,"
" bus_label, type, obj_label, datetime, val) VALUES ")
# export to db
conn.execute(head + records)
def main(**arguments):
nodes_flows, nodes_flows_seq = read_input(**arguments)
if arguments['--repository']:
check_git(**arguments)
print('Writing scenario entry.')
write_info(**arguments)
print('Writing scenario data.')
write_data(nodes_flows=nodes_flows,
nodes_flows_seq=nodes_flows_seq,
**arguments)
print('Writing results to db.')
write_results(**arguments)
if __name__ == '__main__':
arguments = docopt(__doc__, version='scenario_import.py v0.1')
print('Start!')
from db import (LinearTransformer, Source, Sink, Scenario, Storage,
session, Results, conn)
main(**arguments)
print('Done!!!')
| openego/data_processing | preprocessing/python_scripts/renpass_gis/scenario_import/scenario_import.py | Python | agpl-3.0 | 8,426 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class QuestionItem(scrapy.Item):
# define the fields for your item here like:
qid = scrapy.Field() # 问题id
class_info = scrapy.Field() # 问题分类
ask_title = scrapy.Field()
content = scrapy.Field()
ask_time = scrapy.Field() # 提问时间
ask_tags = scrapy.Field() # 问题tags标签,暂以 "," 分隔
people_link = scrapy.Field() # 提问人链接
answers = scrapy.Field()
class AnswerItem(scrapy.Item):
mode = scrapy.Field() # 回答被分类型: 提问者采纳, 专业回答, 网友采纳, 普通回答
pos_time = scrapy.Field() # 回答时间
content = scrapy.Field()
people_link = scrapy.Field() # 回答人链接
| madre/xundao | zhidao_scrapy/zhidao/items.py | Python | apache-2.0 | 869 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras convolution layers and image transformation layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras import activations
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras import constraints
from tensorflow.contrib.keras.python.keras import initializers
from tensorflow.contrib.keras.python.keras import regularizers
from tensorflow.contrib.keras.python.keras.engine import InputSpec
from tensorflow.contrib.keras.python.keras.engine import Layer
# imports for backwards namespace compatibility
# pylint: disable=unused-import
from tensorflow.contrib.keras.python.keras.layers.pooling import AveragePooling1D
from tensorflow.contrib.keras.python.keras.layers.pooling import AveragePooling2D
from tensorflow.contrib.keras.python.keras.layers.pooling import AveragePooling3D
from tensorflow.contrib.keras.python.keras.layers.pooling import MaxPooling1D
from tensorflow.contrib.keras.python.keras.layers.pooling import MaxPooling2D
from tensorflow.contrib.keras.python.keras.layers.pooling import MaxPooling3D
# pylint: enable=unused-import
from tensorflow.contrib.keras.python.keras.utils import conv_utils
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import convolutional as tf_convolutional_layers
class Conv1D(tf_convolutional_layers.Conv1D, Layer):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
with the layer input over a single spatial (or temporal) dimension
to produce a tensor of outputs.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`,
it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide an `input_shape` argument
(tuple of integers or `None`, e.g.
`(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,
or `(None, 128)` for variable-length sequences of 128-dimensional vectors.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"causal"` or `"same"` (case-insensitive).
`"causal"` results in causal (dilated) convolutions, e.g. output[t]
does not depend on input[t+1:]. Useful when modeling temporal data
where the model should not violate the temporal order.
See [WaveNet: A Generative Model for Raw Audio, section
2.1](https://arxiv.org/abs/1609.03499).
dilation_rate: an integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv1D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format='channels_last',
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Conv2D(tf_convolutional_layers.Conv2D, Layer):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
super(Conv2D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Conv3D(tf_convolutional_layers.Conv3D, Layer):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes
with a single channel,
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along each spatial
dimension.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
5D tensor with shape:
`(samples, channels, conv_dim1, conv_dim2, conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, conv_dim1, conv_dim2, conv_dim3, channels)` if
data_format='channels_last'.
Output shape:
5D tensor with shape:
`(samples, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)` if
data_format='channels_last'.
`new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have
changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
super(Conv3D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Conv2DTranspose(tf_convolutional_layers.Conv2DTranspose, Layer):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
super(Conv2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv2DTranspose, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Conv3DTranspose(tf_convolutional_layers.Conv3D, Layer):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels
if `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth, height
and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use
(see [activations](../activations.md)).
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to the kernel matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
Input shape:
5D tensor with shape:
`(batch, channels, depth, rows, cols)` if data_format='channels_first'
or 5D tensor with shape:
`(batch, depth, rows, cols, channels)` if data_format='channels_last'.
Output shape:
5D tensor with shape:
`(batch, filters, new_depth, new_rows, new_cols)` if
data_format='channels_first'
or 5D tensor with shape:
`(batch, new_depth, new_rows, new_cols, filters)` if
data_format='channels_last'.
`depth` and `rows` and `cols` values might have changed due to padding.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
super(Conv3DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv3DTranspose, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SeparableConv2D(tf_convolutional_layers.SeparableConv2D, Layer):
"""Depthwise separable 2D convolution.
Separable convolutions consist in first performing
a depthwise spatial convolution
(which acts on each input channel separately)
followed by a pointwise convolution which mixes together the resulting
output channels. The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Intuitively, separable convolutions can be understood as
a way to factorize a convolution kernel into two smaller kernels,
or as an extreme version of an Inception block.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filterss_in * depth_multiplier`.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix.
pointwise_initializer: Initializer for the pointwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix.
pointwise_regularizer: Regularizer function applied to
the pointwise kernel matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix.
pointwise_constraint: Constraint function applied to
the pointwise kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
super(SeparableConv2D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activations.get(activation),
use_bias=use_bias,
depthwise_initializer=initializers.get(depthwise_initializer),
pointwise_initializer=initializers.get(pointwise_initializer),
bias_initializer=initializers.get(bias_initializer),
depthwise_regularizer=regularizers.get(depthwise_regularizer),
pointwise_regularizer=regularizers.get(pointwise_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'depthwise_initializer': initializers.serialize(
self.depthwise_initializer),
'pointwise_initializer': initializers.serialize(
self.pointwise_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'depthwise_regularizer': regularizers.serialize(
self.depthwise_regularizer),
'pointwise_regularizer': regularizers.serialize(
self.pointwise_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'depthwise_constraint': constraints.serialize(
self.depthwise_constraint),
'pointwise_constraint': constraints.serialize(
self.pointwise_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(SeparableConv2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class UpSampling1D(Layer):
"""Upsampling layer for 1D inputs.
Repeats each temporal step `size` times along the time axis.
Arguments:
size: integer. Upsampling factor.
Input shape:
3D tensor with shape: `(batch, steps, features)`.
Output shape:
3D tensor with shape: `(batch, upsampled_steps, features)`.
"""
def __init__(self, size=2, **kwargs):
super(UpSampling1D, self).__init__(**kwargs)
self.size = int(size)
self.input_spec = InputSpec(ndim=3)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
size = self.size * input_shape[1] if input_shape[1] is not None else None
return tensor_shape.TensorShape([input_shape[0], size, input_shape[2]])
def call(self, inputs):
output = K.repeat_elements(inputs, self.size, axis=1)
return output
def get_config(self):
config = {'size': self.size}
base_config = super(UpSampling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class UpSampling2D(Layer):
"""Upsampling layer for 2D inputs.
Repeats the rows and columns of the data
by size[0] and size[1] respectively.
Arguments:
size: int, or tuple of 2 integers.
The upsampling factors for rows and columns.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, upsampled_rows, upsampled_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, upsampled_rows, upsampled_cols)`
"""
def __init__(self, size=(2, 2), data_format=None, **kwargs):
super(UpSampling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
self.input_spec = InputSpec(ndim=4)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], height, width])
else:
height = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], height, width, input_shape[3]])
def call(self, inputs):
return K.resize_images(inputs, self.size[0], self.size[1], self.data_format)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(UpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class UpSampling3D(Layer):
"""Upsampling layer for 3D inputs.
Repeats the 1st, 2nd and 3rd dimensions
of the data by size[0], size[1] and size[2] respectively.
Arguments:
size: int, or tuple of 3 integers.
The upsampling factors for dim1, dim2 and dim3.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, dim1, dim2, dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, dim1, dim2, dim3)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`
"""
def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 3, 'size')
self.input_spec = InputSpec(ndim=5)
super(UpSampling3D, self).__init__(**kwargs)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
dim1 = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
dim2 = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
dim3 = self.size[2] * input_shape[
4] if input_shape[4] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
else:
dim1 = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
dim2 = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
dim3 = self.size[2] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return K.resize_volumes(inputs, self.size[0], self.size[1], self.size[2],
self.data_format)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(UpSampling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ZeroPadding1D(Layer):
"""Zero-padding layer for 1D input (e.g. temporal sequence).
Arguments:
padding: int, or tuple of int (length 2), or dictionary.
- If int:
How many zeros to add at the beginning and end of
the padding dimension (axis 1).
- If tuple of int (length 2):
How many zeros to add at the beginning and at the end of
the padding dimension (`(left_pad, right_pad)`).
Input shape:
3D tensor with shape `(batch, axis_to_pad, features)`
Output shape:
3D tensor with shape `(batch, padded_axis, features)`
"""
def __init__(self, padding=1, **kwargs):
super(ZeroPadding1D, self).__init__(**kwargs)
self.padding = conv_utils.normalize_tuple(padding, 2, 'padding')
self.input_spec = InputSpec(ndim=3)
def _compute_output_shape(self, input_shape):
if input_shape[1] is not None:
length = input_shape[1] + self.padding[0] + self.padding[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
return K.temporal_padding(inputs, padding=self.padding)
def get_config(self):
config = {'padding': self.padding}
base_config = super(ZeroPadding1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ZeroPadding2D(Layer):
"""Zero-padding layer for 2D input (e.g. picture).
This layer can add rows and columns of zeros
at the top, bottom, left and right side of an image tensor.
Arguments:
padding: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding
is applied to width and height.
- If tuple of 2 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_height_pad, symmetric_width_pad)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_pad, bottom_pad), (left_pad, right_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, padded_rows, padded_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, padded_rows, padded_cols)`
"""
def __init__(self, padding=(1, 1), data_format=None, **kwargs):
super(ZeroPadding2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding))
elif hasattr(padding, '__len__'):
if len(padding) != 2:
raise ValueError('`padding` should have two elements. '
'Found: ' + str(padding))
height_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
width_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
self.padding = (height_padding, width_padding)
else:
raise ValueError('`padding` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_pad, symmetric_width_pad), '
'or a tuple of 2 tuples of 2 ints '
'((top_pad, bottom_pad), (left_pad, right_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=4)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
rows = input_shape[2] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[3] is not None:
cols = input_shape[3] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
rows = input_shape[1] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[2] is not None:
cols = input_shape[2] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
def call(self, inputs):
return K.spatial_2d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ZeroPadding3D(Layer):
"""Zero-padding layer for 3D data (spatial or spatio-temporal).
Arguments:
padding: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding
is applied to width and height.
- If tuple of 2 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((left_dim1_pad, right_dim1_pad), (left_dim2_pad,
right_dim2_pad), (left_dim3_pad, right_dim3_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_axis_to_pad, second_axis_to_pad,
third_axis_to_pad)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_padded_axis, second_padded_axis, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_padded_axis, second_padded_axis,
third_axis_to_pad)`
"""
def __init__(self, padding=(1, 1, 1), data_format=None, **kwargs):
super(ZeroPadding3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding), (padding,
padding))
elif hasattr(padding, '__len__'):
if len(padding) != 3:
raise ValueError('`padding` should have 3 elements. '
'Found: ' + str(padding))
dim1_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
dim2_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
dim3_padding = conv_utils.normalize_tuple(padding[2], 2,
'3rd entry of padding')
self.padding = (dim1_padding, dim2_padding, dim3_padding)
else:
raise ValueError(
'`padding` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_pad, right_dim1_pad),'
' (left_dim2_pad, right_dim2_pad),'
' (left_dim3_pad, right_dim2_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=5)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] + 2 * self.padding[0][0]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] + 2 * self.padding[1][0]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] + 2 * self.padding[2][0]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] + 2 * self.padding[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] + 2 * self.padding[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] + 2 * self.padding[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return K.spatial_3d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Cropping1D(Layer):
"""Cropping layer for 1D input (e.g. temporal sequence).
It crops along the time dimension (axis 1).
Arguments:
cropping: int or tuple of int (length 2)
How many units should be trimmed off at the beginning and end of
the cropping dimension (axis 1).
If a single int is provided,
the same value will be used for both.
Input shape:
3D tensor with shape `(batch, axis_to_crop, features)`
Output shape:
3D tensor with shape `(batch, cropped_axis, features)`
"""
def __init__(self, cropping=(1, 1), **kwargs):
super(Cropping1D, self).__init__(**kwargs)
self.cropping = conv_utils.normalize_tuple(cropping, 2, 'cropping')
self.input_spec = InputSpec(ndim=3)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if input_shape[1] is not None:
length = input_shape[1] - self.cropping[0] - self.cropping[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
if self.cropping[1] == 0:
return inputs[:, self.cropping[0]:, :]
else:
return inputs[:, self.cropping[0]:-self.cropping[1], :]
def get_config(self):
config = {'cropping': self.cropping}
base_config = super(Cropping1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Cropping2D(Layer):
"""Cropping layer for 2D input (e.g. picture).
It crops along spatial dimensions, i.e. width and height.
Arguments:
cropping: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to width and height.
- If tuple of 2 ints:
interpreted as two different
symmetric cropping values for height and width:
`(symmetric_height_crop, symmetric_width_crop)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_crop, bottom_crop), (left_crop, right_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, cropped_rows, cropped_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, cropped_rows, cropped_cols)`
Examples:
```python
# Crop the input 2D images or feature maps
model = Sequential()
model.add(Cropping2D(cropping=((2, 2), (4, 4)),
input_shape=(28, 28, 3)))
# now model.output_shape == (None, 24, 20, 3)
model.add(Conv2D(64, (3, 3), padding='same))
model.add(Cropping2D(cropping=((2, 2), (2, 2))))
# now model.output_shape == (None, 20, 16. 64)
```
"""
def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs):
super(Cropping2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 2:
raise ValueError('`cropping` should have two elements. '
'Found: ' + str(cropping))
height_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
width_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
self.cropping = (height_cropping, width_cropping)
else:
raise ValueError('`cropping` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_crop, symmetric_width_crop), '
'or a tuple of 2 tuples of 2 ints '
'((top_crop, bottom_crop), (left_crop, right_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=4)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([
input_shape[0], input_shape[1],
input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[2] else None,
input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[3] else None
])
else:
return tensor_shape.TensorShape([
input_shape[0],
input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[1] else None,
input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[2] else None, input_shape[3]
])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Cropping3D(Layer):
"""Cropping layer for 3D data (e.g.
spatial or spatio-temporal).
Arguments:
cropping: int, or tuple of 23ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to depth, height, and width.
- If tuple of 3 ints:
interpreted as two different
symmetric cropping values for depth, height, and width:
`(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.
- If tuple of 3 tuples of 2 ints:
interpreted as
`((left_dim1_crop, right_dim1_crop), (left_dim2_crop,
right_dim2_crop), (left_dim3_crop, right_dim3_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_cropped_axis, second_cropped_axis, third_cropped_axis,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_cropped_axis, second_cropped_axis,
third_cropped_axis)`
"""
def __init__(self,
cropping=((1, 1), (1, 1), (1, 1)),
data_format=None,
**kwargs):
super(Cropping3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping), (cropping,
cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 3:
raise ValueError('`cropping` should have 3 elements. '
'Found: ' + str(cropping))
dim1_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
dim2_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
dim3_cropping = conv_utils.normalize_tuple(cropping[2], 2,
'3rd entry of cropping')
self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
else:
raise ValueError(
'`cropping` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_crop, right_dim1_crop),'
' (left_dim2_crop, right_dim2_crop),'
' (left_dim3_crop, right_dim2_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=5)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][
0]:-self.cropping[2][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1], :]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][
0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[
2][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], self.cropping[2][0]: # pylint: disable=invalid-unary-operand-type
-self.cropping[2][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Aliases
Convolution1D = Conv1D
Convolution2D = Conv2D
Convolution3D = Conv3D
SeparableConvolution2D = SeparableConv2D
Convolution2DTranspose = Conv2DTranspose
Convolution3DTranspose = Conv3DTranspose
Deconvolution2D = Deconv2D = Conv2DTranspose
Deconvolution3D = Deconv3D = Conv3DTranspose
| pavelchristof/gomoku-ai | tensorflow/contrib/keras/python/keras/layers/convolutional.py | Python | apache-2.0 | 73,211 |
import math, cv2
def distance(point1, point2):
"""
Euclidean distance.
"""
point1 = point1[0]
point2 = point2[0]
return math.sqrt(math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2))
class Gesture:
"""
Represent the current state of the hand.
"""
def __init__(self):
self.hull_p = None
self.hull_i = None
self.biggest = None
self.bounding = None
self.defects = None
self.contours = None
def check_convexity(self):
"""
Remove the convexity that is not important.
"""
tolerance = (self.bounding[0] - self.bounding[2]) / 5
new_defects = []
if self.defects is not None:
for i in self.defects:
j = i[0]
start = self.contours[self.biggest][j[1]]
end = self.contours[self.biggest][j[0]]
far = self.contours[self.biggest][j[2]]
if distance(start, far) > tolerance and distance(end, far) > tolerance:
new_defects.append(i)
self.defects = new_defects
def is_hand(self):
"""
Checks if it is a hand.
"""
h = abs(self.bounding[0] - self.bounding[2])
w = abs(self.bounding[1] - self.bounding[3])
hand = True
if h is 0 or w is 0:
hand = False
elif h / w > 4 or w / h > 4:
hand = False
return hand
def get_center(self):
"""
Get the center of the hand.
"""
p = cv2.moments(self.contours[self.biggest], binaryImage=True)
return p["m10"] / p["m00"], p["m01"] / p["m00"] | tgodzik/projects | mouse_hand_steering/utils/gesture.py | Python | apache-2.0 | 1,684 |
# Copyright (c) 2021, Manfred Moitzi
# License: MIT License
import math
import pytest
import ezdxf
from ezdxf.document import Drawing
from ezdxf.math import Vec2, arc_angle_span_deg
from ezdxf.render.dim_curved import detect_closer_defpoint, _CurvedDimensionLine
@pytest.fixture(scope="module")
def doc():
return ezdxf.new(setup=True)
class TestDetectCloserDefpoints:
@pytest.mark.parametrize(
"d, offset", # d=direction
[
(Vec2(1, 0), Vec2(0, 0)), # +x direction
(Vec2(0, 1), Vec2(1, -1)), # +y direction
(Vec2(-1, 0), Vec2(-2, 3)), # -x direction
(Vec2(0, -1), Vec2(2, -4)), # -y direction
(Vec2(2, -1), Vec2(20, 45)), # angled
],
ids=["(+x)", "(+y)", "(-x)", "(-y)", "angled"],
)
@pytest.mark.parametrize(
"base",
[
0,
0.5,
1.0,
1.5,
2.0, # equal -> p1
-0.5, # every base left of p1 is closer to p1
-1.0,
-100.0,
],
)
def test_p1_is_closer_to_base(self, base, d, offset):
# e.g. for base=(-1, 0), d=(1, 0):
# base p1 p2
# (-x) <---2---(1)---0---(1)---2---(3)---> (+x)
# By equality p1 if preferred over p2!
# Shift system by an arbitrary offset!
p1 = d * 1 + offset
p2 = d * 3 + offset
base = d * base + offset
assert detect_closer_defpoint(d, base, p1, p2) is p1
@pytest.mark.parametrize(
"d, offset", # d=direction
[
(Vec2(1, 0), Vec2(0, -1)), # +x direction
(Vec2(0, 1), Vec2(2, -2)), # +y direction
(Vec2(-1, 0), Vec2(2, 5)), # -x direction
(Vec2(0, -1), Vec2(1, 0)), # -y direction
(Vec2(2, -1), Vec2(20, 45)), # angled
],
ids=["(+x)", "(+y)", "(-x)", "(-y)", "angled"],
)
@pytest.mark.parametrize(
"base",
[
2.5,
3.0,
4.0, # every base right of p2 is closer to p2
100.0,
],
)
def test_p2_is_closer_to_base(self, base, d, offset):
# e.g. for base=(4.0, 0), d=(1, 0):
# p1 p2 base
# (-x) <---2---1---0---(1)---2---(3)---(4)---> (+x)
# By equality p1 if preferred over p2!
# Shift system by an arbitrary offset!
p1 = d * 1 + offset
p2 = d * 3 + offset
base = d * base + offset
assert detect_closer_defpoint(d, base, p1, p2) is p2
@pytest.mark.parametrize(
"s,e",
[
[60, 120],
[300, 240], # passes 0
[240, 300],
[300, 30], # passes 0
],
)
def test_dimension_line_divided_by_measurement_text(doc: Drawing, s, e):
"""Vertical centered measurement text should hide the part of the
dimension line beneath the text. This creates two arcs instead of one.
"""
msp = doc.modelspace()
dim = msp.add_angular_dim_cra(
center=Vec2(),
radius=5,
start_angle=s,
end_angle=e,
distance=2,
override={"dimtad": 0}, # vertical centered text
)
dim.render()
arcs = dim.dimension.get_geometry_block().query("ARC")
assert len(arcs) == 2
assert sum(
arc_angle_span_deg(arc.dxf.start_angle, arc.dxf.end_angle)
for arc in arcs
) < arc_angle_span_deg(
s, e
), "sum of visual arcs should be smaller than the full arc"
def measure_fixed_angle(msp, angle: float):
x_dist = 15
radius = 3
distance = 1
delta = angle / 2.0
for dimtad, y_dist in [[0, 0], [1, 20], [4, 40]]:
for count in range(8):
center = Vec2(x_dist * count, y_dist)
main_angle = 45.0 * count
start_angle = main_angle - delta
end_angle = main_angle + delta
yield msp.add_angular_dim_cra(
center,
radius,
start_angle,
end_angle,
distance,
override={"dimtad": dimtad},
)
def test_text_and_arrows_fit_between_extension_lines(doc: Drawing):
"""There is enough space between extension lines is to place text and
arrows.
"""
for dim in measure_fixed_angle(doc.modelspace(), angle=20):
render_obj = dim.render()
assert isinstance(render_obj, _CurvedDimensionLine)
assert render_obj.arrows_outside is False
assert render_obj.measurement.is_wide_text is False
assert render_obj.measurement.text_is_outside is False
@pytest.mark.parametrize("angle", [3, 6])
def test_has_outside_text_and_arrows(doc: Drawing, angle):
"""The space between extension lines is too narrow to place text and arrows."""
for dim in measure_fixed_angle(doc.modelspace(), angle=angle):
render_obj = dim.render()
assert isinstance(render_obj, _CurvedDimensionLine)
assert render_obj.arrows_outside is True
assert render_obj.measurement.text_is_outside is True
assert render_obj.measurement.is_wide_text is True
def test_has_outside_text_and_arrows_but_not_a_wide_text(doc: Drawing):
"""The space between extension lines is too narrow to place text and arrows,
but the text alone has enough space.
"""
for dim in measure_fixed_angle(doc.modelspace(), angle=9):
render_obj = dim.render()
assert isinstance(render_obj, _CurvedDimensionLine)
assert render_obj.arrows_outside is True
assert render_obj.measurement.text_is_outside is True
assert render_obj.measurement.is_wide_text is False
def test_fixed_length_extension_lines(doc: Drawing):
msp = doc.modelspace()
dim = msp.add_angular_dim_cra(
center=(0, 0),
radius=5,
distance=2,
start_angle=0,
end_angle=90,
override={
"dimfxlon": 1, # use fixed length extension lines
"dimexe": 0.7, # length "above" the dimension line
"dimfxl": 0.5, # length "below" the dimension line
},
).render()
# only the extension lines are LINE entities:
for line in dim.dimension.get_geometry_block().query("LINE"):
length = line.dxf.start.distance(line.dxf.end)
assert length == pytest.approx(0.5 + 0.7)
if __name__ == "__main__":
pytest.main([__file__])
| mozman/ezdxf | tests/test_07_render/test_712_render_curved_dimension.py | Python | mit | 6,399 |
# -*- coding: utf-8 -*-
from nose.tools import eq_
import bot_mock
from pyfibot.modules import module_urltitle
from utils import check_re
bot = bot_mock.BotMock()
lengh_str_regex = u'\d+(h|m|s)(\d+(m))?(\d+s)?'
views_str_regex = u'\d+(\.\d+)?(k|M|Billion|Trillion)?'
age_str_regex = u'(FRESH|(\d+(\.\d+)?(y|d) ago))'
def test_areena_radio():
regex = u'Title: (.*?) \[%s - %s plays - %s( - exits in \d+ (weeks|days|hours|minutes))?\]' % (lengh_str_regex, views_str_regex, age_str_regex)
msg = "http://areena.yle.fi/radio/2006973"
module_urltitle.init(bot)
check_re(regex, module_urltitle.handle_url(bot, None, "#channel", msg, msg)[1])
def test_areena_tv():
regex = u'Title: (.*?) \[%s - %s plays - %s( - exits in \d+ (weeks|days|hours|minutes))?\]' % (lengh_str_regex, views_str_regex, age_str_regex)
msg = "http://areena.yle.fi/tv/1999860"
module_urltitle.init(bot)
check_re(regex, module_urltitle.handle_url(bot, None, "#channel", msg, msg)[1])
def test_areena_series():
regex = u'Title: (.*?) \[SERIES - \d+ episodes - latest episode: %s\]' % (age_str_regex)
msg = "http://areena.yle.fi/tv/2129619"
module_urltitle.init(bot)
check_re(regex, module_urltitle.handle_url(bot, None, "#channel", msg, msg)[1])
def test_areena_live():
msg = "http://areena.yle.fi/tv/suora/tv2"
module_urltitle.init(bot)
eq_(('#channel', u'Title: Yle TV2 (LIVE)'), module_urltitle.handle_url(bot, None, "#channel", msg, msg))
| huqa/pyfibot | tests/test_areena.py | Python | bsd-3-clause | 1,476 |
# -*- coding: utf-8 -*-
###############################################################################
# Information
###############################################################################
# Created by Linwood Creekmore
# Input by Vikram Mittal
# In partial fulfillment of the requirements for the Georgetown University Data Analytics Graduate Certificate Program
# April 19, 2015
# https://plus.google.com/+LinwoodCreekmoreIII/
###############################################################################
# Imports
###############################################################################
import os
import csv
###############################################################################
# Main Functions
###############################################################################
#def AgTables(driver,trip)
path = os.path.abspath(os.getcwd())
rootdir = os.path.normpath(os.path.join(os.path.dirname(path),os.path.basename(path),"output","aggregate"))
agvalues = []
for subdir, dirs, files in os.walk(rootdir):
for file in files:
if file.endswith('.csv'):
with open(os.path.join(subdir, file),'rb') as infile:
fieldnames = ['driver_id', 'trip_id', 'Average Velocity (mph)', 'Max Velocity', 'Velocity Stdev','Average Acceleration (mph per s)', 'Max Acceleration (mph per s)', ' Acceleration Stdev','Displacement','Total Distance Traveled','Max Direction Change per sec', ' Direction Stdev','Time (s)', 'Turns', 'Aggressive Turns', 'Stops', 'Large Deceleration Events', 'Deceleration Events', 'Max Deceleration Event']
reader = csv.reader(infile)
reader.next()
for row in reader:
agvalues.append(row)
with open(os.path.normpath(os.path.join(os.path.dirname(path),os.path.basename(path),"lin.csv")),'wb') as outfile:
writer = csv.writer(outfile, delimiter=',', quotechar='"', lineterminator = '\n')
writer.writerow(['driver_id', 'trip_id', 'Average Velocity (mph)', 'Max Velocity', 'Velocity Stdev','Average Acceleration (mph per s)', 'Max Acceleration (mph per s)', ' Acceleration Stdev','Displacement','Total Distance Traveled','Max Direction Change per sec', ' Direction Stdev','Time (s)', 'Turns', 'Aggressive Turns', 'Stops', 'Large Deceleration Events', 'Deceleration Events', 'Max Deceleration Event'])
for l in agvalues:
writer.writerow(l)
###############################################################################
# 'Main' Function
###############################################################################
'''
if __name__ == '__main__':
driver = raw_input('Pick a driver. Enter a number between 1-3612:\n')
trip = raw_input('Pick a trip. Enter a number between 1-200:\n')
AgTables(driver,trip)
'''
| georgetown-analytics/skidmarks | bin/lin.py | Python | mit | 2,801 |
from django import template
from django.urls import reverse
from django.utils.safestring import mark_safe
from .. import markdown_extensions
import markdown
register = template.Library()
def create_markdown_filter(*extra_extensions):
def _wrapped(value):
extensions = ['markdown.extensions.{}'.format(e) for e in ('nl2br', 'smart_strong', 'sane_lists')]
extensions += extra_extensions
return mark_safe(markdown.markdown(value, safe_mode='escape', output_format='html5', lazy_ol=False, extensions=extensions))
return _wrapped
blogpost_markdown = create_markdown_filter(
markdown_extensions.EmbedExtension(),
markdown_extensions.InlineURLExtension(),
markdown_extensions.HashtagExtension(lambda t: reverse('blog_tags', kwargs={'taglist': t[1:]}))
)
register.filter(name='markdown', is_safe=True)(blogpost_markdown)
| AlexandreDecan/Lexpage | app/commons/templatetags/markup_markdown.py | Python | gpl-3.0 | 865 |
# -*- coding: utf-8 -*-
#
# Copyright 2013 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from pecan import rest
from pecan import abort
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from mistral import exceptions as ex
from mistral.api.controllers.v1 import task
from mistral.openstack.common import log as logging
from mistral.api.controllers import resource
from mistral.db import api as db_api
from mistral.engine import engine
LOG = logging.getLogger(__name__)
class Execution(resource.Resource):
"""Execution resource."""
id = wtypes.text
workbook_name = wtypes.text
task = wtypes.text
state = wtypes.text
# Context is a JSON object but since WSME doesn't support arbitrary
# dictionaries we have to use text type convert to json and back manually.
context = wtypes.text
def to_dict(self):
d = super(Execution, self).to_dict()
if d.get('context'):
d['context'] = json.loads(d['context'])
return d
@classmethod
def from_dict(cls, d):
e = cls()
for key, val in d.items():
if hasattr(e, key):
if key == 'context' and val:
val = json.dumps(val)
setattr(e, key, val)
return e
class Executions(resource.Resource):
"""A collection of Execution resources."""
executions = [Execution]
class ExecutionsController(rest.RestController):
tasks = task.TasksController()
@wsme_pecan.wsexpose(Execution, wtypes.text, wtypes.text)
def get(self, workbook_name, id):
LOG.debug("Fetch execution [workbook_name=%s, id=%s]" %
(workbook_name, id))
values = db_api.execution_get(workbook_name, id)
if not values:
abort(404)
else:
return Execution.from_dict(values)
@wsme_pecan.wsexpose(Execution, wtypes.text, wtypes.text, body=Execution)
def put(self, workbook_name, id, execution):
LOG.debug("Update execution [workbook_name=%s, id=%s, execution=%s]" %
(workbook_name, id, execution))
values = db_api.execution_update(workbook_name,
id,
execution.to_dict())
return Execution.from_dict(values)
@wsme_pecan.wsexpose(Execution, wtypes.text, body=Execution,
status_code=201)
def post(self, workbook_name, execution):
LOG.debug("Create execution [workbook_name=%s, execution=%s]" %
(workbook_name, execution))
try:
context = None
if execution.context:
context = json.loads(execution.context)
values = engine.start_workflow_execution(execution.workbook_name,
execution.task,
context)
except ex.MistralException as e:
#TODO(nmakhotkin) we should use thing such a decorator here
abort(400, e.message)
return Execution.from_dict(values)
@wsme_pecan.wsexpose(None, wtypes.text, wtypes.text, status_code=204)
def delete(self, workbook_name, id):
LOG.debug("Delete execution [workbook_name=%s, id=%s]" %
(workbook_name, id))
db_api.execution_delete(workbook_name, id)
@wsme_pecan.wsexpose(Executions, wtypes.text)
def get_all(self, workbook_name):
LOG.debug("Fetch executions [workbook_name=%s]" % workbook_name)
executions = [Execution.from_dict(values)
for values in db_api.executions_get(workbook_name)]
return Executions(executions=executions)
| dzimine/mistral | mistral/api/controllers/v1/execution.py | Python | apache-2.0 | 4,285 |
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to download sdk/extras packages on the bots from google storage.
The script expects arguments that specify zips file in the google storage
bucket named: <dir in SDK extras>_<package name>_<version>.zip. The file will
be extracted in the android_tools/sdk/extras directory on the test bots. This
script will not do anything for developers.
TODO(navabi): Move this script (crbug.com/459819).
"""
import json
import os
import shutil
import subprocess
import sys
import zipfile
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CHROME_SRC = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
sys.path.insert(0, os.path.join(SCRIPT_DIR, 'android'))
sys.path.insert(1, os.path.join(CHROME_SRC, 'tools'))
from pylib import constants
import find_depot_tools
DEPOT_PATH = find_depot_tools.add_depot_tools_to_path()
GSUTIL_PATH = os.path.join(DEPOT_PATH, 'gsutil.py')
SDK_EXTRAS_BUCKET = 'gs://chrome-sdk-extras'
SDK_EXTRAS_PATH = os.path.join(constants.ANDROID_SDK_ROOT, 'extras')
SDK_EXTRAS_JSON_FILE = os.path.join(os.path.dirname(__file__),
'android_sdk_extras.json')
def clean_and_extract(dir_name, package_name, zip_file):
local_dir = '%s/%s/%s' % (SDK_EXTRAS_PATH, dir_name, package_name)
if os.path.exists(local_dir):
shutil.rmtree(local_dir)
local_zip = '%s/%s' % (SDK_EXTRAS_PATH, zip_file)
with zipfile.ZipFile(local_zip) as z:
z.extractall(path=SDK_EXTRAS_PATH)
def main():
# Always do nothing. Crosswalk's bots have CHROME_HEADLESS set, but
# the files in the bucket below can only be fetched from Google's
# infrastructure.
return 0
if not os.environ.get('CHROME_HEADLESS'):
# This is not a buildbot checkout.
return 0
# Update the android_sdk_extras.json file to update downloaded packages.
with open(SDK_EXTRAS_JSON_FILE) as json_file:
packages = json.load(json_file)
for package in packages:
local_zip = '%s/%s' % (SDK_EXTRAS_PATH, package['zip'])
if not os.path.exists(local_zip):
package_zip = '%s/%s' % (SDK_EXTRAS_BUCKET, package['zip'])
try:
subprocess.check_call(['python', GSUTIL_PATH, '--force-version', '4.7',
'cp', package_zip, local_zip])
except subprocess.CalledProcessError:
print ('WARNING: Failed to download SDK packages. If this bot compiles '
'for Android, it may have errors.')
return 0
# Always clean dir and extract zip to ensure correct contents.
clean_and_extract(package['dir_name'], package['package'], package['zip'])
if __name__ == '__main__':
sys.exit(main())
| Fireblend/chromium-crosswalk | build/download_sdk_extras.py | Python | bsd-3-clause | 2,798 |
"""Test class for Host Collection UI
:Requirement: Hostcollection
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: HostCollections
:Assignee: swadeley
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import time
import pytest
from broker import VMBroker
from nailgun import entities
from robottelo.api.utils import promote
from robottelo.api.utils import update_vm_host_location
from robottelo.config import settings
from robottelo.constants import DISTRO_DEFAULT
from robottelo.constants import DISTRO_RHEL8
from robottelo.constants import FAKE_0_CUSTOM_PACKAGE
from robottelo.constants import FAKE_0_CUSTOM_PACKAGE_GROUP
from robottelo.constants import FAKE_0_CUSTOM_PACKAGE_GROUP_NAME
from robottelo.constants import FAKE_0_CUSTOM_PACKAGE_NAME
from robottelo.constants import FAKE_0_MODULAR_ERRATA_ID
from robottelo.constants import FAKE_1_CUSTOM_PACKAGE
from robottelo.constants import FAKE_1_CUSTOM_PACKAGE_NAME
from robottelo.constants import FAKE_2_CUSTOM_PACKAGE
from robottelo.constants import FAKE_2_ERRATA_ID
from robottelo.constants import FAKE_3_CUSTOM_PACKAGE
from robottelo.constants import FAKE_3_CUSTOM_PACKAGE_NAME
from robottelo.constants import FAKE_4_CUSTOM_PACKAGE_NAME
from robottelo.constants import FAKE_5_CUSTOM_PACKAGE
from robottelo.constants.repos import CUSTOM_MODULE_STREAM_REPO_2
from robottelo.constants.repos import FAKE_1_YUM_REPO
from robottelo.constants.repos import FAKE_6_YUM_REPO
from robottelo.datafactory import gen_string
from robottelo.helpers import add_remote_execution_ssh_key
from robottelo.hosts import ContentHost
from robottelo.products import RepositoryCollection
from robottelo.products import SatelliteToolsRepository
from robottelo.products import YumRepository
@pytest.fixture(scope='module')
def module_org():
org = entities.Organization().create()
# adding remote_execution_connect_by_ip=Yes at org level
entities.Parameter(
name='remote_execution_connect_by_ip', value='Yes', organization=org.id
).create()
return org
@pytest.fixture(scope='module')
def module_loc():
return entities.Location().create()
@pytest.fixture(scope='module')
def module_lce(module_org):
return entities.LifecycleEnvironment(organization=module_org).create()
@pytest.fixture(scope='module')
def module_repos_collection(module_org, module_lce):
repos_collection = RepositoryCollection(
distro=DISTRO_DEFAULT,
repositories=[
SatelliteToolsRepository(),
YumRepository(url=FAKE_1_YUM_REPO),
YumRepository(url=FAKE_6_YUM_REPO),
],
)
repos_collection.setup_content(module_org.id, module_lce.id, upload_manifest=True)
return repos_collection
@pytest.fixture(scope='module')
def module_repos_collection_module_stream(module_org, module_lce):
repos_collection = RepositoryCollection(
distro=DISTRO_RHEL8, repositories=[YumRepository(url=CUSTOM_MODULE_STREAM_REPO_2)]
)
repos_collection.setup_content(module_org.id, module_lce.id, upload_manifest=True)
return repos_collection
@pytest.fixture
def vm_content_hosts(request, module_loc, module_repos_collection):
distro = module_repos_collection.distro
with VMBroker(nick=distro, host_classes={'host': ContentHost}, _count=2) as clients:
for client in clients:
module_repos_collection.setup_virtual_machine(client, install_katello_agent=False)
add_remote_execution_ssh_key(client.ip_addr)
update_vm_host_location(client, module_loc.id)
smart_proxy = (
entities.SmartProxy()
.search(query={'search': f'name={settings.server.hostname}'})[0]
.read()
)
smart_proxy.location.append(entities.Location(id=module_loc.id))
smart_proxy.update(['location'])
yield clients
@pytest.fixture
def vm_content_hosts_module_stream(module_loc, module_repos_collection_module_stream):
distro = module_repos_collection_module_stream.distro
with VMBroker(nick=distro, host_classes={'host': ContentHost}, _count=2) as clients:
for client in clients:
module_repos_collection_module_stream.setup_virtual_machine(
client, install_katello_agent=False
)
add_remote_execution_ssh_key(client.ip_addr)
update_vm_host_location(client, module_loc.id)
smart_proxy = (
entities.SmartProxy()
.search(query={'search': f'name={settings.server.hostname}'})[0]
.read()
)
smart_proxy.location.append(entities.Location(id=module_loc.id))
smart_proxy.update(['location'])
yield clients
@pytest.fixture
def vm_host_collection(module_org, vm_content_hosts):
host_ids = [
entities.Host().search(query={'search': f'name={host.hostname}'})[0].id
for host in vm_content_hosts
]
host_collection = entities.HostCollection(host=host_ids, organization=module_org).create()
return host_collection
@pytest.fixture
def vm_host_collection_module_stream(module_org, vm_content_hosts_module_stream):
host_ids = [
entities.Host().search(query={'search': f'name={host.hostname}'})[0].id
for host in vm_content_hosts_module_stream
]
host_collection = entities.HostCollection(host=host_ids, organization=module_org).create()
return host_collection
def _run_remote_command_on_content_hosts(command, vm_clients):
"""run remote command on content hosts"""
for vm_client in vm_clients:
result = vm_client.run(command)
assert result.status == 0
def _is_package_installed(
vm_clients, package_name, expect_installed=True, retries=10, iteration_sleep=15
):
"""Check whether package name was installed on the list of Virtual Machines
clients.
"""
assert len(vm_clients) > 0
installed = 0
if not expect_installed:
installed = len(vm_clients)
for vm_client in vm_clients:
for ind in range(retries):
result = vm_client.run(f'rpm -q {package_name}')
if result.status == 0 and expect_installed:
installed += 1
break
elif result.status != 0 and not expect_installed:
installed -= 1
break
if ind < retries - 1:
time.sleep(iteration_sleep)
else:
break
if expect_installed:
return installed == len(vm_clients)
else:
return bool(installed)
def _install_package_with_assertion(vm_clients, package_name):
"""Install package in Virtual machine clients and assert installed"""
for client in vm_clients:
result = client.run(f'yum install -y {package_name}')
assert result.status == 0
assert _is_package_installed(vm_clients, package_name)
def _get_content_repository_urls(repos_collection, lce, content_view):
"""Returns a list of the content repository urls"""
custom_url_template = (
'https://{hostname}/pulp/repos/{org_label}/{lce.name}'
'/{content_view.name}/custom/{product_label}/{repository_name}'
)
rh_sat_tools_url_template = (
'https://{hostname}/pulp/repos/{org_label}/{lce.name}'
'/{content_view.name}/content/dist/rhel/server/{major_version}'
'/{major_version}Server/$basearch/sat-tools/{product_version}/os'
)
repos_urls = [
custom_url_template.format(
hostname=settings.server.hostname,
org_label=repos_collection.organization['label'],
lce=lce,
content_view=content_view,
product_label=repos_collection.custom_product['label'],
repository_name=repository['name'],
)
for repository in repos_collection.custom_repos_info
]
# add sat-tool rh repo
# Note: if sat-tools is not cdn it must be already in repos_urls
for repo in repos_collection:
if isinstance(repo, SatelliteToolsRepository) and repo.cdn:
repos_urls.append(
rh_sat_tools_url_template.format(
hostname=settings.server.hostname,
org_label=repos_collection.organization['label'],
lce=lce,
content_view=content_view,
major_version=repo.distro_major_version,
product_version=repo.repo_data['version'],
)
)
return repos_urls
@pytest.mark.tier2
@pytest.mark.upgrade
def test_positive_end_to_end(session, module_org, module_loc):
"""Perform end to end testing for host collection component
:id: 1d40bc74-8e05-42fa-b6e3-2999dc3b730d
:expectedresults: All expected CRUD actions finished successfully
:CaseLevel: Integration
:CaseImportance: High
"""
hc_name = gen_string('alpha')
new_name = gen_string('alpha')
description = gen_string('alpha')
host = entities.Host(organization=module_org, location=module_loc).create()
with session:
# Create new host collection
session.hostcollection.create(
{'name': hc_name, 'unlimited_hosts': False, 'max_hosts': 2, 'description': description}
)
assert session.hostcollection.search(hc_name)[0]['Name'] == hc_name
session.hostcollection.associate_host(hc_name, host.name)
hc_values = session.hostcollection.read(hc_name, widget_names=['details', 'hosts'])
assert hc_values['details']['name'] == hc_name
assert hc_values['details']['description'] == description
assert hc_values['details']['content_hosts'] == '1'
assert hc_values['details']['content_host_limit'] == '2'
assert hc_values['hosts']['resources']['assigned'][0]['Name'] == host.name
# View host collection on dashboard
values = session.dashboard.read('HostCollections')
assert [hc_name, '1'] in [
[coll['Name'], coll['Content Hosts']] for coll in values['collections']
]
# Update host collection with new name
session.hostcollection.update(hc_name, {'details.name': new_name})
assert session.hostcollection.search(new_name)[0]['Name'] == new_name
# Delete host collection
session.hostcollection.delete(new_name)
assert not session.hostcollection.search(new_name)
@pytest.mark.tier2
def test_negative_install_via_remote_execution(session, module_org, module_loc):
"""Test basic functionality of the Hosts collection UI install package via
remote execution.
:id: c5fe46fb-0b34-4ea3-bc53-e86c18adaf94
:setup: Create a host collection with two fake hosts assigned.
:expectedresults: The package is not installed, and the job invocation
status contains some expected values: hosts information, jos status.
:CaseLevel: Integration
"""
hosts = []
for _ in range(2):
hosts.append(entities.Host(organization=module_org, location=module_loc).create())
host_collection = entities.HostCollection(
host=[host.id for host in hosts], organization=module_org
).create()
with session:
job_values = session.hostcollection.manage_packages(
host_collection.name,
packages=FAKE_0_CUSTOM_PACKAGE_NAME,
action='install',
action_via='via remote execution',
)
assert job_values['job_status'] == 'Failed'
assert job_values['job_status_progress'] == '100%'
assert int(job_values['total_hosts']) == len(hosts)
assert {host.name for host in hosts} == {host['Host'] for host in job_values['hosts_table']}
@pytest.mark.tier2
def test_negative_install_via_custom_remote_execution(session, module_org, module_loc):
"""Test basic functionality of the Hosts collection UI install package via
remote execution - customize first.
:id: 5aa7f084-bab7-4e62-9bf3-a37fd4aa71fa
:setup: Create a host collection with two fake hosts assigned.
:expectedresults: The package is not installed, and the job invocation
status contains some expected values: hosts information, jos status.
:CaseLevel: Integration
"""
hosts = []
for _ in range(2):
hosts.append(entities.Host(organization=module_org, location=module_loc).create())
host_collection = entities.HostCollection(
host=[host.id for host in hosts], organization=module_org
).create()
with session:
job_values = session.hostcollection.manage_packages(
host_collection.name,
packages=FAKE_0_CUSTOM_PACKAGE_NAME,
action='install',
action_via='via remote execution - customize first',
)
assert job_values['job_status'] == 'Failed'
assert job_values['job_status_progress'] == '100%'
assert int(job_values['total_hosts']) == len(hosts)
assert {host.name for host in hosts} == {host['Host'] for host in job_values['hosts_table']}
@pytest.mark.upgrade
@pytest.mark.tier3
def test_positive_add_host(session):
"""Check if host can be added to Host Collection
:id: 80824c9f-15a1-4f76-b7ac-7d9ca9f6ed9e
:expectedresults: Host is added to Host Collection successfully
:CaseLevel: System
"""
hc_name = gen_string('alpha')
org = entities.Organization().create()
loc = entities.Location().create()
cv = entities.ContentView(organization=org).create()
lce = entities.LifecycleEnvironment(organization=org).create()
cv.publish()
promote(cv.read().version[0], lce.id)
host = entities.Host(
organization=org,
location=loc,
content_facet_attributes={'content_view_id': cv.id, 'lifecycle_environment_id': lce.id},
).create()
with session:
session.organization.select(org_name=org.name)
session.location.select(loc_name=loc.name)
session.hostcollection.create({'name': hc_name})
assert session.hostcollection.search(hc_name)[0]['Name'] == hc_name
session.hostcollection.associate_host(hc_name, host.name)
hc_values = session.hostcollection.read(hc_name, widget_names='hosts')
assert hc_values['hosts']['resources']['assigned'][0]['Name'] == host.name
@pytest.mark.tier3
@pytest.mark.upgrade
def test_positive_install_package(session, module_org, vm_content_hosts, vm_host_collection):
"""Install a package to hosts inside host collection remotely
:id: eead8392-0ffc-4062-b045-5d0252670775
:expectedresults: Package was successfully installed on all the hosts
in host collection
:CaseLevel: System
"""
with session:
session.organization.select(org_name=module_org.name)
session.hostcollection.manage_packages(
vm_host_collection.name, packages=FAKE_0_CUSTOM_PACKAGE_NAME, action='install'
)
assert _is_package_installed(vm_content_hosts, FAKE_0_CUSTOM_PACKAGE_NAME)
@pytest.mark.tier3
@pytest.mark.upgrade
def test_positive_remove_package(session, module_org, vm_content_hosts, vm_host_collection):
"""Remove a package from hosts inside host collection remotely
:id: 488fa88d-d0ef-4108-a050-96fb621383df
:expectedresults: Package was successfully removed from all the hosts
in host collection
:CaseLevel: System
"""
_install_package_with_assertion(vm_content_hosts, FAKE_0_CUSTOM_PACKAGE)
with session:
session.organization.select(org_name=module_org.name)
session.hostcollection.manage_packages(
vm_host_collection.name, packages=FAKE_0_CUSTOM_PACKAGE_NAME, action='remove'
)
assert not _is_package_installed(
vm_content_hosts, FAKE_0_CUSTOM_PACKAGE_NAME, expect_installed=False
)
@pytest.mark.tier3
def test_positive_upgrade_package(session, module_org, vm_content_hosts, vm_host_collection):
"""Upgrade a package on hosts inside host collection remotely
:id: 5a6fff0a-686f-419b-a773-4d03713e47e9
:expectedresults: Package was successfully upgraded on all the hosts in
host collection
:CaseLevel: System
"""
_install_package_with_assertion(vm_content_hosts, FAKE_1_CUSTOM_PACKAGE)
with session:
session.organization.select(org_name=module_org.name)
session.hostcollection.manage_packages(
vm_host_collection.name, packages=FAKE_1_CUSTOM_PACKAGE_NAME, action='update'
)
assert _is_package_installed(vm_content_hosts, FAKE_2_CUSTOM_PACKAGE)
@pytest.mark.tier3
@pytest.mark.upgrade
def test_positive_install_package_group(session, module_org, vm_content_hosts, vm_host_collection):
"""Install a package group to hosts inside host collection remotely
:id: 2bf47798-d30d-451a-8de5-bc03bd8b9a48
:expectedresults: Package group was successfully installed on all the
hosts in host collection
:CaseLevel: System
"""
with session:
session.organization.select(org_name=module_org.name)
session.hostcollection.manage_packages(
vm_host_collection.name,
content_type='Package Group',
packages=FAKE_0_CUSTOM_PACKAGE_GROUP_NAME,
action='install',
)
for package in FAKE_0_CUSTOM_PACKAGE_GROUP:
assert _is_package_installed(vm_content_hosts, package)
@pytest.mark.tier3
def test_positive_remove_package_group(session, module_org, vm_content_hosts, vm_host_collection):
"""Remove a package group from hosts inside host collection remotely
:id: 458897dc-9836-481a-b777-b147d64836f2
:expectedresults: Package group was successfully removed on all the
hosts in host collection
:CaseLevel: System
"""
for client in vm_content_hosts:
result = client.run(f'yum groups install -y {FAKE_0_CUSTOM_PACKAGE_GROUP_NAME}')
assert result.status == 0
for package in FAKE_0_CUSTOM_PACKAGE_GROUP:
assert _is_package_installed(vm_content_hosts, package)
with session:
session.organization.select(org_name=module_org.name)
session.hostcollection.manage_packages(
vm_host_collection.name,
content_type='Package Group',
packages=FAKE_0_CUSTOM_PACKAGE_GROUP_NAME,
action='remove',
)
for package in FAKE_0_CUSTOM_PACKAGE_GROUP:
assert not _is_package_installed(vm_content_hosts, package, expect_installed=False)
@pytest.mark.tier3
@pytest.mark.upgrade
def test_positive_install_errata(session, module_org, vm_content_hosts, vm_host_collection):
"""Install an errata to the hosts inside host collection remotely
:id: 69c83000-0b46-4735-8c03-e9e0b48af0fb
:expectedresults: Errata was successfully installed in all the hosts in
host collection
:CaseLevel: System
"""
_install_package_with_assertion(vm_content_hosts, FAKE_1_CUSTOM_PACKAGE)
with session:
session.organization.select(org_name=module_org.name)
task_values = session.hostcollection.install_errata(
vm_host_collection.name,
FAKE_2_ERRATA_ID,
install_via='via remote execution',
)
assert task_values['result'] == 'success'
assert _is_package_installed(vm_content_hosts, FAKE_2_CUSTOM_PACKAGE)
@pytest.mark.tier3
def test_positive_change_assigned_content(
session, module_org, module_lce, vm_content_hosts, vm_host_collection, module_repos_collection
):
"""Change Assigned Life cycle environment and content view of host
collection
:id: e426064a-db3d-4a94-822a-fc303defe1f9
:customerscenario: true
:steps:
1. Setup activation key with content view that contain product
repositories
2. Prepare hosts (minimum 2) and subscribe them to activation key,
katello agent must be also installed and running on each host
3. Create a host collection and add the hosts to it
4. Run "subscription-manager repos" command on each host to notice
the repos urls current values
5. Create a new life cycle environment
6. Create a copy of content view and publish/promote it to the new
life cycle environment
7. Go to Hosts => Hosts Collections and select the host collection
8. under host collection details tab notice the Actions Area and
click on the link
"Change assigned Lifecycle Environment or Content View"
9. When a dialog box is open, select the new life cycle environment
and the new content view
10. Click on "Assign" button and click "Yes" button on confirmation
dialog when it appears
11. After last step the host collection change task page will
appear
12. Run "subscription-manager refresh" command on each host
13. Run "subscription-manager repos" command on each host
:expectedresults:
1. The host collection change task successfully finished
2. The "subscription-manager refresh" command successfully executed
and "All local data refreshed" message is displayed
3. The urls listed by last command "subscription-manager repos" was
updated to the new Life cycle environment and content view
names
:BZ: 1315280
:CaseLevel: System
"""
new_lce_name = gen_string('alpha')
new_cv_name = gen_string('alpha')
new_lce = entities.LifecycleEnvironment(name=new_lce_name, organization=module_org).create()
content_view = entities.ContentView(
id=module_repos_collection.setup_content_data['content_view']['id']
).read()
new_content_view = entities.ContentView(id=content_view.copy(data={'name': new_cv_name})['id'])
new_content_view.publish()
new_content_view = new_content_view.read()
new_content_view_version = new_content_view.version[0]
new_content_view_version.promote(data={'environment_ids': new_lce.id})
# repository urls listed by command "subscription-manager repos" looks
# like:
# Repo URL : https://{host}/pulp/repos/{org}/{lce}/{cv}/custom
# /{product_name}/{repo_name}
repo_line_start_with = 'Repo URL: '
expected_repo_urls = _get_content_repository_urls(
module_repos_collection, module_lce, content_view
)
for client in vm_content_hosts:
result = client.run("subscription-manager repos")
assert result.status == 0
client_repo_urls = [
line.split(' ')[-1] for line in result.stdout if line.startswith(repo_line_start_with)
]
assert len(client_repo_urls) > 0
assert set(expected_repo_urls) == set(client_repo_urls)
with session:
session.organization.select(org_name=module_org.name)
task_values = session.hostcollection.change_assigned_content(
vm_host_collection.name, new_lce.name, new_content_view.name
)
assert task_values['result'] == 'success'
expected_repo_urls = _get_content_repository_urls(
module_repos_collection, new_lce, new_content_view
)
for client in vm_content_hosts:
result = client.run("subscription-manager refresh")
assert result.status == 0
assert 'All local data refreshed' in result.stdout
result = client.run("subscription-manager repos")
assert result.status == 0
client_repo_urls = [
line.split(' ')[-1]
for line in result.stdout
if line.startswith(repo_line_start_with)
]
assert len(client_repo_urls) > 0
assert set(expected_repo_urls) == set(client_repo_urls)
@pytest.mark.tier3
def test_negative_hosts_limit(session, module_org, module_loc):
"""Check that Host limit actually limits usage
:id: 57b70977-2110-47d9-be3b-461ad15c70c7
:Steps:
1. Create Host Collection entity that can contain only one Host
(using Host Limit field)
2. Create Host and add it to Host Collection. Check that it was
added successfully
3. Create one more Host and try to add it to Host Collection
4. Check that expected error is shown
:expectedresults: Second host is not added to Host Collection and
appropriate error is shown
:CaseLevel: System
"""
hc_name = gen_string('alpha')
org = entities.Organization().create()
cv = entities.ContentView(organization=org).create()
lce = entities.LifecycleEnvironment(organization=org).create()
cv.publish()
promote(cv.read().version[0], lce.id)
hosts = []
for _ in range(2):
hosts.append(
entities.Host(
organization=module_org,
location=module_loc,
content_facet_attributes={
'content_view_id': cv.id,
'lifecycle_environment_id': lce.id,
},
).create()
)
assert len(hosts) == 2
with session:
session.hostcollection.create({'name': hc_name, 'unlimited_hosts': False, 'max_hosts': 1})
assert session.hostcollection.search(hc_name)[0]['Name'] == hc_name
session.hostcollection.associate_host(hc_name, hosts[0].name)
with pytest.raises(AssertionError) as context:
session.hostcollection.associate_host(hc_name, hosts[1].name)
assert "cannot have more than 1 host(s) associated with host collection '{}'".format(
hc_name
) in str(context.value)
@pytest.mark.tier3
@pytest.mark.upgrade
def test_positive_install_module_stream(
session, vm_content_hosts_module_stream, vm_host_collection_module_stream
):
"""Install a module-stream to hosts inside host collection remotely
:id: e5d882e0-3520-4cb6-8629-ef4c18692868
:Steps:
1. Run dnf upload profile to sync module streams from hosts to Satellite
2. Navigate to host_collection
3. Install the module stream duck
4. Verify that remote job get passed
5. Verify that package get installed
:expectedresults: Module-Stream should get installed on all the hosts
in host collection
:CaseLevel: System
"""
with session:
_run_remote_command_on_content_hosts(
'dnf -y upload-profile', vm_content_hosts_module_stream
)
result = session.hostcollection.manage_module_streams(
vm_host_collection_module_stream.name,
action_type="Install",
module_name=FAKE_3_CUSTOM_PACKAGE_NAME,
stream_version="0",
)
assert result['overview']['job_status'] == 'Success'
assert result['overview']['job_status_progress'] == '100%'
assert int(result['overview']['total_hosts']) == 2
assert _is_package_installed(vm_content_hosts_module_stream, FAKE_3_CUSTOM_PACKAGE)
@pytest.mark.tier3
@pytest.mark.upgrade
def test_positive_install_modular_errata(
session, vm_content_hosts_module_stream, vm_host_collection_module_stream
):
"""Install Modular Errata generated from module streams.
:id: 8d6fb447-af86-4084-a147-7910f0cecdef
:Steps:
1. Generate modular errata by installing older version of module stream
2. Run dnf upload-profile
3. Install the modular errata by 'remote execution'
4. Verify that latest package get installed
:expectedresults: Modular Errata should get installed on all hosts in host
collection.
:CaseLevel: System
"""
with session:
stream = "0"
version = "20180704111719"
_module_install_command = 'dnf -y module install {}:{}:{}'.format(
FAKE_4_CUSTOM_PACKAGE_NAME, stream, version
)
_run_remote_command_on_content_hosts(
_module_install_command, vm_content_hosts_module_stream
)
_run_remote_command_on_content_hosts(
'dnf -y upload-profile', vm_content_hosts_module_stream
)
result = session.hostcollection.install_errata(
vm_host_collection_module_stream.name,
FAKE_0_MODULAR_ERRATA_ID,
install_via='via remote execution',
)
assert result['job_status'] == 'Success'
assert result['job_status_progress'] == '100%'
assert int(result['total_hosts']) == 2
assert _is_package_installed(vm_content_hosts_module_stream, FAKE_5_CUSTOM_PACKAGE)
| jyejare/robottelo | tests/foreman/ui/test_hostcollection.py | Python | gpl-3.0 | 28,395 |
# Copyright (c) 2015 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from ironicclient import client
import keystoneclient.v2_0.client as ksclient
from oslo_log import log as logging
import six
from congress.datasources import datasource_driver
from congress.datasources import datasource_utils as ds_utils
LOG = logging.getLogger(__name__)
def d6service(name, keys, inbox, datapath, args):
"""This method is called by d6cage to create a dataservice instance."""
return IronicDriver(name, keys, inbox, datapath, args)
class IronicDriver(datasource_driver.DataSourceDriver,
datasource_driver.ExecutionDriver):
CHASSISES = "chassises"
NODES = "nodes"
NODE_PROPERTIES = "node_properties"
PORTS = "ports"
DRIVERS = "drivers"
ACTIVE_HOSTS = "active_hosts"
# This is the most common per-value translator, so define it once here.
value_trans = {'translation-type': 'VALUE'}
def safe_id(x):
if isinstance(x, six.string_types):
return x
try:
return x['id']
except KeyError:
return str(x)
def safe_port_extra(x):
try:
return x['vif_port_id']
except KeyError:
return ""
chassises_translator = {
'translation-type': 'HDICT',
'table-name': CHASSISES,
'selector-type': 'DOT_SELECTOR',
'field-translators':
({'fieldname': 'uuid', 'col': 'id', 'translator': value_trans},
{'fieldname': 'created_at', 'translator': value_trans},
{'fieldname': 'updated_at', 'translator': value_trans})}
nodes_translator = {
'translation-type': 'HDICT',
'table-name': NODES,
'selector-type': 'DOT_SELECTOR',
'field-translators':
({'fieldname': 'uuid', 'col': 'id', 'translator': value_trans},
{'fieldname': 'chassis_uuid', 'col': 'owner_chassis',
'translator': value_trans},
{'fieldname': 'power_state', 'translator': value_trans},
{'fieldname': 'maintenance', 'translator': value_trans},
{'fieldname': 'properties', 'translator':
{'translation-type': 'HDICT',
'table-name': NODE_PROPERTIES,
'parent-key': 'id',
'parent-col-name': 'properties',
'selector-type': 'DICT_SELECTOR',
'in-list': False,
'field-translators':
({'fieldname': 'memory_mb',
'translator': value_trans},
{'fieldname': 'cpu_arch',
'translator': value_trans},
{'fieldname': 'local_gb',
'translator': value_trans},
{'fieldname': 'cpus',
'translator': value_trans})}},
{'fieldname': 'driver', 'translator': value_trans},
{'fieldname': 'instance_uuid', 'col': 'running_instance',
'translator': value_trans},
{'fieldname': 'created_at', 'translator': value_trans},
{'fieldname': 'provision_updated_at', 'translator': value_trans},
{'fieldname': 'updated_at', 'translator': value_trans})}
ports_translator = {
'translation-type': 'HDICT',
'table-name': PORTS,
'selector-type': 'DOT_SELECTOR',
'field-translators':
({'fieldname': 'uuid', 'col': 'id', 'translator': value_trans},
{'fieldname': 'node_uuid', 'col': 'owner_node',
'translator': value_trans},
{'fieldname': 'address', 'col': 'mac_address',
'translator': value_trans},
{'fieldname': 'extra', 'col': 'vif_port_id', 'translator':
{'translation-type': 'VALUE',
'extract-fn': safe_port_extra}},
{'fieldname': 'created_at', 'translator': value_trans},
{'fieldname': 'updated_at', 'translator': value_trans})}
drivers_translator = {
'translation-type': 'HDICT',
'table-name': DRIVERS,
'selector-type': 'DOT_SELECTOR',
'field-translators':
({'fieldname': 'name', 'translator': value_trans},
{'fieldname': 'hosts', 'translator':
{'translation-type': 'LIST',
'table-name': ACTIVE_HOSTS,
'parent-key': 'name',
'parent-col-name': 'name',
'val-col': 'hosts',
'translator':
{'translation-type': 'VALUE'}}})}
TRANSLATORS = [chassises_translator, nodes_translator, ports_translator,
drivers_translator]
def __init__(self, name='', keys='', inbox=None, datapath=None, args=None):
super(IronicDriver, self).__init__(name, keys, inbox, datapath, args)
datasource_driver.ExecutionDriver.__init__(self)
self.creds = self.get_ironic_credentials(args)
self.ironic_client = client.get_client(**self.creds)
self._init_end_start_poll()
@staticmethod
def get_datasource_info():
result = {}
result['id'] = 'ironic'
result['description'] = ('Datasource driver that interfaces with '
'OpenStack bare metal aka ironic.')
result['config'] = ds_utils.get_openstack_required_config()
result['secret'] = ['password']
return result
def get_ironic_credentials(self, creds):
d = {}
d['api_version'] = '1'
d['insecure'] = False
# save a copy to renew auth token
d['username'] = creds['username']
d['password'] = creds['password']
d['auth_url'] = creds['auth_url']
d['tenant_name'] = creds['tenant_name']
# ironicclient.get_client() uses different names
d['os_username'] = creds['username']
d['os_password'] = creds['password']
d['os_auth_url'] = creds['auth_url']
d['os_tenant_name'] = creds['tenant_name']
return d
def update_from_datasource(self):
try:
chassises = self.ironic_client.chassis.list(detail=True, limit=0)
self._translate_chassises(chassises)
nodes = self.ironic_client.node.list(detail=True, limit=0)
self._translate_nodes(nodes)
ports = self.ironic_client.port.list(detail=True, limit=0)
self._translate_ports(ports)
drivers = self.ironic_client.driver.list()
self._translate_drivers(drivers)
except Exception as e:
# TODO(zhenzanz): this is a workaround. The ironic client should
# handle 401 error.
if e.http_status == 401:
keystone = ksclient.Client(**self.creds)
self.ironic_client.http_client.auth_token = keystone.auth_token
else:
raise e
@ds_utils.update_state_on_changed(CHASSISES)
def _translate_chassises(self, obj):
row_data = IronicDriver.convert_objs(obj,
IronicDriver.chassises_translator)
return row_data
@ds_utils.update_state_on_changed(NODES)
def _translate_nodes(self, obj):
row_data = IronicDriver.convert_objs(obj,
IronicDriver.nodes_translator)
return row_data
@ds_utils.update_state_on_changed(PORTS)
def _translate_ports(self, obj):
row_data = IronicDriver.convert_objs(obj,
IronicDriver.ports_translator)
return row_data
@ds_utils.update_state_on_changed(DRIVERS)
def _translate_drivers(self, obj):
row_data = IronicDriver.convert_objs(obj,
IronicDriver.drivers_translator)
return row_data
def execute(self, action, action_args):
"""Overwrite ExecutionDriver.execute()."""
# action can be written as a method or an API call.
func = getattr(self, action, None)
if func and self.is_executable(func):
func(action_args)
else:
self._execute_api(self.ironic_client, action, action_args)
| ekcs/congress | congress/datasources/ironic_driver.py | Python | apache-2.0 | 9,443 |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and Contributors
# See license.txt
import frappe
import unittest
import json
from frappe.website.doctype.personal_data_download_request.personal_data_download_request import get_user_data
from frappe.contacts.doctype.contact.contact import get_contact_name
from frappe.core.doctype.user.user import create_contact
class TestRequestPersonalData(unittest.TestCase):
def setUp(self):
create_user_if_not_exists(email='test_privacy@example.com')
def tearDown(self):
frappe.db.sql("""DELETE FROM `tabPersonal Data Download Request`""")
def test_user_data_creation(self):
user_data = json.loads(get_user_data('test_privacy@example.com'))
contact_name = get_contact_name('test_privacy@example.com')
expected_data = {'Contact': frappe.get_all('Contact', {"name": contact_name}, ["*"])}
expected_data = json.loads(json.dumps(expected_data, default=str))
self.assertEqual({'Contact': user_data['Contact']}, expected_data)
def test_file_and_email_creation(self):
frappe.set_user('test_privacy@example.com')
download_request = frappe.get_doc({
"doctype": 'Personal Data Download Request',
'user': 'test_privacy@example.com'
})
download_request.save(ignore_permissions=True)
frappe.set_user('Administrator')
file_count = frappe.db.count('File', {
'attached_to_doctype':'Personal Data Download Request',
'attached_to_name': download_request.name
})
self.assertEqual(file_count, 1)
email_queue = frappe.get_all('Email Queue',
fields=['message'],
order_by="creation DESC",
limit=1)
self.assertTrue("Subject: Download Your Data" in email_queue[0].message)
frappe.db.sql("delete from `tabEmail Queue`")
def create_user_if_not_exists(email, first_name = None):
frappe.delete_doc_if_exists("User", email)
user = frappe.get_doc({
"doctype": "User",
"user_type": "Website User",
"email": email,
"send_welcome_email": 0,
"first_name": first_name or email.split("@")[0],
"birth_date": frappe.utils.now_datetime()
}).insert(ignore_permissions=True)
create_contact(user=user)
| mhbu50/frappe | frappe/website/doctype/personal_data_download_request/test_personal_data_download_request.py | Python | mit | 2,103 |
import flask
from pypi_vm.services import package_service
from pypi_vm.viewmodels.shared.viewmodel_base import ViewModelBase
class SiteMapViewModel(ViewModelBase):
def __init__(self, limit: int):
super().__init__()
self.packages = package_service.all_packages(limit)
self.last_updated_text = "2018-07-15"
self.site = "{}://{}".format(flask.request.scheme, flask.request.host)
| Wintellect/WintellectWebinars | 2019-06-06-ten-tips-python-web-devs-kennedy/code/top_10_web_explore/ex07_viewmodels/pypi_vm/viewmodels/utils/sitemap_viewmodel.py | Python | apache-2.0 | 415 |
#!/usr/bin/env python2
# Copyright (C) 2016-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###################################################################################
# #
# ESPResSo++ Python script for an MD simulation of a simple Lennard-Jones fluid #
# #
###################################################################################
"""
This is an example for an MD simulation of a simple Lennard-Jones fluid
with ESPResSo++. We will start with particles at random positions within
the simulation box interacting via a shifted Lennard-Jones type potential
with an interaction cutoff at 2.5.
Newtons equations of motion are integrated with a Velocity-Verlet integrator.
The canonical (NVT) ensemble is realized by using a Langevin thermostat.
In order to prevent explosion due to strongly overlapping volumes of
random particles the system needs to be warmed up first.
Warm-up is accomplished by using a repelling-only LJ interaction
(cutoff=1.12246, shift=0.25) with a force capping at radius 0.6
and initial small LJ epsilon value of 0.1.
System is warmup with steepest descent energy minimization method.
"""
# import the ESPResSo++ python module
import espressopp
########################################################################
# 1. specification of the main simulation parameters #
########################################################################
# number of particles
Npart = 3000
# density of particles
rho = 0.8442
# length of simulation box
L = pow(Npart/rho, 1.0/3.0)
# cubic simulation box of size L
box = (L, L, L)
# cutoff of the short range potential
r_cutoff = 2.5
# VerletList skin size (also used for domain decomposition)
skin = 0.4
# the temperature of the system
temperature = 1.0
# time step for the velocity verlet integrator
dt = 0.005
# Lennard Jones epsilon during equilibration phase
epsilon = 1.0
# Lennard Jones sigma during warmup and equilibration
sigma = 1.0
# number of integration steps performed in each warm-up loop
warmup_isteps = 200
# number of equilibration loops
equil_nloops = 100
# number of integration steps performed in each equilibration loop
equil_isteps = 100
# EM settings
em_gamma = 0.0001
em_ftol = 10.0
# print ESPResSo++ version and compile info
print espressopp.Version().info()
# print simulation parameters (useful to have them in a log file)
print "Npart = ", Npart
print "rho = ", rho
print "L = ", L
print "box = ", box
print "r_cutoff = ", r_cutoff
print "skin = ", skin
print "temperature = ", temperature
print "dt = ", dt
print "epsilon = ", epsilon
print "sigma = ", sigma
print "equil_nloops = ", equil_nloops
print "equil_isteps = ", equil_isteps
########################################################################
# 2. setup of the system, random number geneartor and parallelisation #
########################################################################
# create the basic system
system = espressopp.System()
# use the random number generator that is included within the ESPResSo++ package
system.rng = espressopp.esutil.RNG()
# use orthorhombic periodic boundary conditions
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
# set the skin size used for verlet lists and cell sizes
system.skin = skin
# get the number of CPUs to use
NCPUs = espressopp.MPI.COMM_WORLD.size
# calculate a regular 3D grid according to the number of CPUs available
nodeGrid = espressopp.tools.decomp.nodeGrid(NCPUs,box,r_cutoff,skin)
# calculate a 3D subgrid to speed up verlet list builds and communication
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, r_cutoff, skin)
# create a domain decomposition particle storage with the calculated nodeGrid and cellGrid
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
print "NCPUs = ", NCPUs
print "nodeGrid = ", nodeGrid
print "cellGrid = ", cellGrid
########################################################################
# 3. adding the particles #
########################################################################
print('adding {} particles to the system ...'.format(Npart))
particle_list = [(pid, system.bc.getRandomPos()) for pid in range(Npart)]
system.storage.addParticles(particle_list, 'id', 'pos')
system.storage.decompose()
print('added {} particles'.format(Npart))
########################################################################
# 4. setting up interaction potential for the equilibration #
########################################################################
# create a new verlet list that uses a cutoff radius = r_cutoff
# the verlet radius is automatically increased by system.skin (see system setup)
verletlist = espressopp.VerletList(system, r_cutoff)
# define a Lennard-Jones interaction that uses a verlet list
interaction = espressopp.interaction.VerletListLennardJones(verletlist)
system.addInteraction(interaction)
# use a Lennard-Jones potential between 2 particles of type 0
# the potential is automatically shifted so that U(r=cutoff) = 0.0
# if the potential should not be shifted set shift=0.0
potential = interaction.setPotential(type1=0, type2=0,
potential=espressopp.interaction.LennardJones(
epsilon=epsilon, sigma=sigma, cutoff=r_cutoff, shift=0.0))
# 5. Run EM
print('Running energy minimization, ftol={} max_displacement={}, steps={}, gamma={}'.format(
em_ftol, 0.001*L, warmup_isteps, em_gamma))
#import logging
#logging.getLogger('MinimizeEnergy').setLevel(logging.DEBUG)
minimize_energy = espressopp.integrator.MinimizeEnergy(system, em_gamma, em_ftol, 0.001*L)
while not minimize_energy.run(warmup_isteps, True):
pass
########################################################################
# 6. setup of the integrator and simulation ensemble #
########################################################################
# use a velocity Verlet integration scheme
integrator = espressopp.integrator.VelocityVerlet(system)
# set the integration step
integrator.dt = dt
# use a thermostat if the temperature is set
if (temperature != None):
# create e Langevin thermostat
thermostat = espressopp.integrator.LangevinThermostat(system)
# set Langevin friction constant
thermostat.gamma = 1.0
# set temperature
thermostat.temperature = temperature
# tell the integrator to use this thermostat
integrator.addExtension(thermostat)
## steps 2. and 3. could be short-cut by the following expression:
## system, integrator = espressopp.standard_system.Default(box, warmup_cutoff, skin, dt, temperature)
########################################################################
# 7. running the equilibration loop #
########################################################################
# add the new interaction to the system
system.addInteraction(interaction)
# since the interaction cut-off changed the size of the cells that are used
# to speed up verlet list builds should be adjusted accordingly
system.storage.cellAdjust()
# set all integrator timers to zero again (they were increased during warmup)
integrator.resetTimers()
# set integrator time step to zero again
integrator.step = 0
print "starting equilibration ..."
# print inital status information
espressopp.tools.analyse.info(system, integrator)
for step in range(equil_nloops):
# perform equilibration_isteps integration steps
integrator.run(equil_isteps)
# print status information
espressopp.tools.analyse.info(system, integrator)
print "equilibration finished"
########################################################################
# 8. writing configuration to file #
########################################################################
# write folded xyz coordinates and particle velocities into a file
# format of xyz file is:
# first line : number of particles
# second line : box_Lx, box_Ly, box_Lz
# all other lines : ParticleID ParticleType x_pos y_pos z_pos x_vel y_vel z_vel
filename = "lennard_jones_fluid_%0i.xyz" % integrator.step
print "writing final configuration file ..."
espressopp.tools.writexyz(filename, system, velocities = True, unfolded = False)
# also write a PDB file which can be used to visualize configuration with VMD
print "writing pdb file ..."
filename = "lennard_jones_fluid_%0i.pdb" % integrator.step
espressopp.tools.pdbwrite(filename, system, molsize=Npart)
print "finished."
| kkreis/espressopp | examples/lennard_jones_em/lennard_jones.py | Python | gpl-3.0 | 9,827 |
#!/usr/bin/python
import os
import sys
sys.path.append(os.path.join(os.getcwd(), '../'))
import pytest
import blackjack.dealer as dealer
import blackjack.table as table
def test_dummy():
for i in range(5):
t = table.Table()
def test_string_representation():
name = 'Lob'
assert "Dealer %s" % name == "%s" % dealer.Dealer(name, None)
| suhasgaddam/blackjack-python | blackjack/test/test_dealer.py | Python | mit | 358 |
import glob, os,csv
import sys
import numpy as np
import matplotlib.pyplot as plt
import plotly.plotly as py
import plotly.graph_objs as go
def read_files(folder):
runs = []
print("DIRECTORY")
print(folder)
os.chdir(folder)
for file in glob.glob("*.csv"):
with open(file) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
steps = []
values = []
row1 = next(readCSV) # First line is header
for i,row in enumerate(readCSV):
#steps.append(step)
if i < 250:
step = row[1]
value = row[2]
steps.append(step)
values.append(float(value))
runs.append(values)
return [runs,steps]
def main():
current_folder = os.path.dirname(os.path.realpath(__file__))
#plt.figure()
#plt.xlabel("Steps", fontsize=18)
# plt.ylabel(sys.argv[1], fontsize=18)
data = []
for arg in sys.argv[2:]:
runs = read_files(current_folder + arg)
average_runs = [float(sum(col))/len(col) for col in zip(*runs[0])]
steps = runs[1]
trace = go.Scatter(
x = steps,
y = average_runs,
mode = 'lines',
name = 'lines')
data.append(trace)
#plt.plot(steps,average_runs)
#plt.show()
#np.savetxt('test.csv', (average_runs), delimiter=',')
# Create traces
# Create traces
if __name__ == "__main__":
main()
| kclauw/Dueling_Network_Architectures | results/average_runs.py | Python | mit | 1,498 |
#!/usr/bin/env python
import os
import sys
from optparse import HelpFormatter
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT)
import upload
class GCWikiHelpFormatter (HelpFormatter):
"""Format help with wiki markup for Google Code."""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
self._dl_open = False
def indent(self):
self._pending = 'INDENT'
HelpFormatter.indent(self)
def dedent(self):
self._pending = 'DEDENT'
HelpFormatter.dedent(self)
def format_usage(self, usage):
return "*Usage summary:* `%s`\n" % usage
def format_heading(self, heading):
if self._dl_open:
pre = '\n</dl>\n'
else:
pre = ''
markup = '='*(self.current_indent+2)
self._dl_open = True
return "%s%s %s %s\n<dl>\n" % (pre, markup, heading, markup)
def format_option(self, option):
result = []
opts = self.option_strings[option]
result.append('<dt>`%s`</dt>\n' % opts)
if option.help:
help_text = '<dd>%s</dd>\n' % self.expand_default(option)
result.append(help_text)
return ''.join(result)
def main():
upload.parser.formatter = GCWikiHelpFormatter()
print HEADER
print upload.parser.format_option_help()
print '</dl>' # TODO: Formatter should do this
print FOOTER
print
HEADER = """#summary upload.py usage and options.
<wiki:comment>
THIS PAGE IS AUTOGENERATED. DO NOT EDIT.
To update this page run tools/uploadopts2wiki.py
</wiki:comment>
= upload.py Usage =
[http://codereview.appspot.com/static/upload.py upload.py] is a tool
for uploading diffs from a version control system to the codereview app.
*Usage summary:*
{{{upload.py [options] [-- diff_options]}}}
Diff options are passed to the diff command of the underlying system.
*Supported version control systems:*
* Git
* Mercurial
* Subversion
* Perforce
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
FOOTER = """\
==== Running upload.py from perforce GUIs ====
You can right click on a perforce changelist and create a new Rietveld code review by adding a custom tool with the following settings:
Application: python Arguments: /PATH/TO/upload.py -s MY_SERVER --p4_changelist %p --p4_port $p --p4_user $u --p4_client $c Start In: empty. Check "Add to applicable context menus", "Run tool in terminal window" (or system equivalent), and "Ignore P4CONFIG files".
Replace /PATH/TO/ with the location of upload.py, and MY_SERVER with the rietveld code review server. See screenshot [http://alexmccarthy.net/Rietveld%20-%20P4V%20Custom%20Tool%20Settings.png here].
"""
if __name__ == '__main__':
main()
| nicko96/Chrome-Infra | appengine/chromium_rietveld/tools/uploadopts2wiki.py | Python | bsd-3-clause | 2,871 |
import pandas as pd
import numpy as np
import copy
import flat
#変更の適用
# 解を適用してfitをNoneにする
def _apply_sol(bee, sol):
bee["sol"] = sol
bee["fit"] = {key: None for key in bee["fit"].keys()}
return bee
#基本的な近傍探索
#upgrade: 使う強化剤を一段回引き上げる
# level["None", "+1"]のように、安いの順に
def upgrade(in_bee, type_item, level_item):
bee = copy.deepcopy(in_bee)
#強化剤のランクのリスト変換
sol = [level_item.index(x) for x in bee["sol"][type_item]]
#1段階良い強化剤を使うようにする
# 最高の強化剤を使っていたらそのまま
sol_upgraded = [x+1 if x+1 < len(level_item) else x for x in sol]
#蜂への変更の適用
# fitはすべてNoneにする
sol = bee["sol"]
sol[type_item] = [level_item[x] for x in sol_upgraded]
bee = _apply_sol(bee, sol)
return bee
#downgrade: 使う強化剤を一段階引き下げる
def downgrade(in_bee, type_item, level_item):
bee = copy.deepcopy(in_bee)
#強化剤のランクのリスト変換
sol = [level_item.index(x) for x in bee["sol"][type_item]]
#1段階低い強化剤を使うようにする
# 強化剤未使用はそのまま
sol_downgraded = [x-1 if x-1 >= 0 else x for x in sol]
#蜂への変更の適用
# fitはすべてNoneにする
sol = bee["sol"]
sol[type_item] = [level_item[x] for x in sol_downgraded]
bee = _apply_sol(bee, sol)
return bee
#flatten: 使う強化剤を平滑化する
# ["None", "+1", "None, "None", "+2", "+1"]
# -> ["None", "+1", "+1", "+1", "+2", "+2"]みたいな
def flatten(in_bee, type_item, level_item):
bee = copy.deepcopy(in_bee)
#蜂への変更の適用
# fitはすべてNoneにする
sol = bee["sol"]
sol[type_item] = flat.flatten_sol(bee["sol"], type_item, level_item)
bee = _apply_sol(bee, sol)
return bee
#flatten_: 使う強化剤を両方とも平滑化する
# levels_item={'reducer': ["None", "+1"], 'booster': ["None", "+10%"]}みたいな
def flatten_both(in_bee, levels_item):
bee = copy.deepcopy(in_bee)
#各強化剤でflattenを実行
sol = {item: flat.flatten_sol(bee["sol"], item, levels_item[item]) for item in levels_item.keys()}
bee = _apply_sol(bee, sol)
return bee | curiburn/pso2_grind_optimizer | py/neighborsearch.py | Python | gpl-3.0 | 2,435 |
# Copyright (C) 2016-Today - KMEE (<http://kmee.com.br>).
# Luis Felipe Miléo - mileo@kmee.com.br
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api
from odoo.addons import decimal_precision as dp
from odoo.tools.float_utils import float_round as round
from .account_payment_mode import OPERATION_TYPE
class PaymentLine(models.Model):
_inherit = 'account.payment.line'
@api.model
def _get_info_partner(self, partner_record):
if not partner_record:
return False
st = partner_record.street or ''
n = partner_record.street_number or ''
st1 = partner_record.street2 or ''
zip = partner_record.zip or ''
city = partner_record.city_id.name or ''
uf = partner_record.state_id.code or ''
zip_city = city + '-' + uf + '\n' + zip
cntry = partner_record.country_id and \
partner_record.country_id.name or ''
cnpj = partner_record.cnpj_cpf or ''
return partner_record.legal_name or '' + "\n" + cnpj + "\n" + st \
+ ", " + n + " " + st1 + "\n" + zip_city + "\n" + cntry
@api.multi
@api.depends('percent_interest', 'amount_currency')
def _compute_interest(self):
for record in self:
precision = record.env[
'decimal.precision'].precision_get('Account')
record.amount_interest = round(
record.amount_currency * (
record.percent_interest / 100), precision)
linha_digitavel = fields.Char(string="Linha Digitável")
percent_interest = fields.Float(string="Percentual de Juros",
digits=dp.get_precision('Account'))
amount_interest = fields.Float(string="Valor Juros",
compute='_compute_interest',
digits=dp.get_precision('Account'))
operation_type = fields.Selection(
selection=OPERATION_TYPE,
string='Tipo de Operação',
related='order_id.operation_type',
store=True
)
| akretion/l10n-brazil | l10n_br_account_payment_order/models/account_payment_line.py | Python | agpl-3.0 | 2,102 |
""":mod:`earthreader.web.exceptions` --- Exceptions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from flask import jsonify
from werkzeug.exceptions import HTTPException
class IteratorNotFound(ValueError):
"""Raised when the iterator does not exist"""
class JsonException(HTTPException):
"""Base exception to return json response when raised.
Exceptions inherit this class must declare `error` and `message`.
"""
def get_response(self, environ=None):
r = jsonify(error=self.error, message=self.message)
r.status_code = 404
return r
class InvalidCategoryID(ValueError, JsonException):
"""Raised when the category ID is not valid."""
error = 'category-id-invalid'
message = 'Given category id is not valid'
class FeedNotFound(ValueError, JsonException):
"""Raised when the feed is not reachable."""
error = 'feed-not-found'
message = 'The feed you request does not exsist'
class EntryNotFound(ValueError, JsonException):
"""Raised when the entry is not reachable."""
error = 'entry-not-found'
message = 'The entry you request does not exist'
class WorkerNotRunning(ValueError, JsonException):
"""Raised when the worker thread is not running."""
error = 'worker-not-running'
message = 'The worker thread that crawl feeds in background is not' \
'running.'
| earthreader/web | earthreader/web/exceptions.py | Python | agpl-3.0 | 1,387 |
#/usr/bin/local/python
# *-* coding: utf-8 *-*
import matplotlib.pyplot as plt
import numpy as np
import math
from popkin.utility.kinematic import segment_ends_from_pose
class ArmEstimationVisualizer:
def __init__(self, mmc):
self.mmc=mmc
self.callbacks={}
self.callbacks_click={}
self.estimated_pose=np.zeros((len(mmc.representations),2))
self.fignum=37
self.fig=plt.figure(self.fignum)
cid = self.fig.canvas.mpl_connect('button_press_event', self.onClick)
cid_k=self.fig.canvas.mpl_connect('key_press_event', self.onKeyPress)
plt.plot([0,1],[1,0])
self.ax=self.fig.get_axes()[0]
layout_rows=2
layout_cols=2
panel_layout=layout_rows*100+10*layout_cols
self.r_max_ee=self.mmc.representations[self.mmc.end_effector_name].radius_max
# self.subplot_activation={}
# for i,f in enumerate(fields):
# self.subplot_activation[f]=plt.subplot(panel_layout+i+1)
# self.subplot_activation[f].set_title(f.name)
#
# f.plot_activation(self.subplot_activation[f])
def start(self):
self.refreshPlot()
plt.show()
def onClick(self, event):
#print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(
#event.button, event.x, event.y, event.xdata, event.ydata)
click_position=np.array([event.xdata,event.ydata])
click_position_polar=np.array([(click_position[0]**2.0+click_position[1]**2.0)**0.5,math.atan2(click_position[1],click_position[0])])
#left mouse-button
if event.button in self.callbacks_click.keys():
self.callbacks_click[event.button](click_position)
self.refreshPlot()
def onKeyPress(self, event):
if event.key in self.callbacks.keys():
self.callbacks[event.key]()
self.refreshPlot()
def addCallbackClick(self, button, callback):
self.callbacks_click[button]=callback
def addCallback(self, key, callback):
self.callbacks[key]=callback
print self.callbacks
def plot_estimation(self):
self.ax.arrow(-0.8*self.r_max_ee, 0, 1.6*self.r_max_ee, 0.0, head_width=0.05*self.r_max_ee, head_length=0.1*self.r_max_ee, fc='k', ec='k')
self.ax.arrow(0, -0.8*self.r_max_ee, 0.0, 1.6*self.r_max_ee, head_width=0.05*self.r_max_ee, head_length=0.1*self.r_max_ee, fc='k', ec='k')
seg_ends=[[0.0,0.0]]
#Segmentenden schätzen lokal
for i_segment,name in enumerate(self.mmc.segment_names):
segment=self.mmc.representations[name]
seg_ends.append(segment.estimate())
seg_ends=np.array(seg_ends)
#Segmentenden global berechnen
for i in range(self.mmc.n_segments-1,-1,-1):
print i
seg_ends[i+1:,:]+=seg_ends[i,:]
#Plot segment ends
self.ax.plot(seg_ends[:,0],seg_ends[:,1],c='black')
for t in self.mmc.triangles:
r0,r1,rs=t
r0e=r0.estimate()
r1e=r1.estimate()
rse=rs.estimate()
origin_idx=int(rs.name[1])
origin=seg_ends[origin_idx,:]
#origin=np.array([0.0,0.0])
#xs=[origin[0],origin[0]+r0e[0],origin[0]+r0e[0]+r1e[0]]
#ys=[origin[1],origin[1]+r0e[1],origin[1]+r0e[1]+r1e[1]]
#self.ax.plot(xs,ys,c='black')
self.ax.plot([origin[0],origin[0]+rse[0]],[origin[1],origin[1]+rse[1]],c='green', linestyle="dotted")
ee_estimation=self.mmc.end_effector.estimate()
self.ax.scatter(ee_estimation[0],ee_estimation[1],c='black')
def plot_evidences(self):
#print "evidences"
#print self.mmc.evidences
if self.mmc.end_effector in self.mmc.evidences.keys():
ee_ev=self.mmc.evidences[self.mmc.end_effector]
self.ax.scatter([ee_ev[0]],[ee_ev[1]],c='red')
print "EE",ee_ev
all_segments=True
for seg in self.mmc.segments:
all_segments = all_segments and seg in self.mmc.evidences
if all_segments:
target_pose=np.zeros((len(self.mmc.representations),2))
for i,seg in enumerate(self.mmc.segments):
ev=self.mmc.evidences[seg]
target_pose[i,0]=seg.radius_max
target_pose[i,1]=math.atan2(ev[1],ev[0])
seg_ends=segment_ends_from_pose(target_pose.flatten())#-pose[0]))
self.ax.plot(seg_ends[:,0],seg_ends[:,1],c='red')
def refreshPlot(self):
plt.figure(self.fignum)
self.ax.cla()
self.plot_evidences()
self.plot_estimation()
self.ax.set_xlim(-self.r_max_ee,self.r_max_ee)
self.ax.set_ylim(-self.r_max_ee,self.r_max_ee)
#field.plot_activation(subplot)
plt.draw()
| mbaumBielefeld/popkin | popkin/visualization/armestimationvisualizer.py | Python | gpl-2.0 | 4,973 |
# -*- coding: utf-8 -*-
import os
import os.path as op
import yaml
import logging.config
from .text_files import read
from ..config import LOG_LEVEL
MODULE_NAME = __name__.split('.')[0]
def setup_logging(log_config_file=op.join(op.dirname(__file__), 'logger.yml'),
log_default_level=LOG_LEVEL,
env_key=MODULE_NAME.upper() + '_LOG_CFG'):
"""Setup logging configuration."""
path = log_config_file
value = os.getenv(env_key, None)
if value:
path = value
if op.exists(path):
log_cfg = yaml.load(read(path).format(MODULE_NAME))
logging.config.dictConfig(log_cfg)
#print('Started logging using config file {0}.'.format(path))
else:
logging.basicConfig(level=log_default_level)
#print('Started default logging. Could not find config file '
# 'in {0}.'.format(path))
log = logging.getLogger(__name__)
log.debug('Start logging.')
| Neurita/luigi | galvani/utils/logger.py | Python | bsd-3-clause | 955 |
#!/usr/bin/env python3
import bottle
from . import errors
BACKEND_NAME_PATTERN='<name:re:.*>'
class PaltoServer(bottle.Bottle):
"""
"""
def __init__(self, **kargs):
bottle.Bottle.__init__(self, **kargs)
self.backends = kargs.pop('backends', {})
self.setup_routes()
def setup_routes(self):
root_callback = lambda : self.get_route('')
self.add_route(bottle.Route(self, '/', 'GET', root_callback, name='alto-service'))
get = lambda name: self.get_route(name)
post = lambda name: self.post_route(name)
put = lambda name: self.put_route(name)
delete = lambda name: self.delete_route(name)
callbacks = {'GET' : get, 'POST' : post, 'PUT' : put, 'DELETE' : delete }
for method, callback in callbacks.items():
path = '/{}'.format(BACKEND_NAME_PATTERN)
self.add_route(bottle.Route(self, path, method, callback))
def add_backend(self, name, backend):
if name in self.backends:
raise Exception('The name %s has been registered'.format(name))
self.backends[name] = backend
def get_backends(self):
return self.backends
def get_backend(self, name):
return self.backends.get(name)
def remove_backend(self, name):
self.backends.pop(name)
def dispatch(self, name, task_template):
backend = self.get_backend(name)
if backend is None:
return errors.not_found(bottle.response, service=name)
try:
return task_template(backend, bottle.request, bottle.response)
except Exception as e:
return errors.server_error(bottle.response, exception=e)
def get_route(self, name):
get = lambda backend, request, response: backend.get(request, response)
return self.dispatch(name, get)
def post_route(self, name):
post = lambda backend, request, response: backend.post(request, response)
return self.dispatch(name, post)
def put_route(self, name):
put = lambda backend, request, response: backend.put(request, response)
return self.dispatch(name, put)
def delete_route(self, name):
delete = lambda backend, request, response: backend.delete(request, response)
return self.dispatch(name, delete)
def test():
from .rfc7285 import AbstractNetworkMapBackend
import configparser
nmb = AbstractNetworkMapBackend(configparser.ConfigParser(), False)
server = PaltoServer()
server.add_backend('test', nmb)
get_url = lambda : 'base url: {}'.format(server.get_url('alto-service'))
server.add_route(bottle.Route(server, '/get_baseurl', 'GET', get_url))
server.run(host='localhost', port=3400, debug=True)
if __name__ == '__main__':
test()
| snlab/alto-server | palto/paltoserver.py | Python | apache-2.0 | 2,796 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2013 by
# Erwin Marsi and TST-Centrale
#
#
# This file is part of the Algraeph program.
#
# The Algraeph program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# The Algraeph program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = "Erwin Marsi <e.marsi@gmail.com>"
from os import getcwd
from os.path import basename, splitext
import wx.html
from graeph.pubsub import ( subscribe, save_subscribe, unsubscribe, send,
receive, is_subscribed )
from graeph.dotgraph import BasicDotGraphPair
from graeph.graphviz import ( update_image_file, update_image_map, get_html,
draw, get_output_formats )
class GraphView(wx.html.HtmlWindow):
"""
Minimal class for viewing graphs
"""
def __init__(self, parent, aligner, algraephFrame):
wx.html.HtmlWindow.__init__(self, parent,
style=wx.NO_FULL_REPAINT_ON_RESIZE|wx.SUNKEN_BORDER)
self.aligner = aligner
self.algraephFrame = algraephFrame
self.initDotGraphPair()
self.from_node_prefix = self.dotGraphPair.from_subgraph.node_prefix
self.to_node_prefix = self.dotGraphPair.to_subgraph.node_prefix
self.initViewMenu()
self.replaceViewMenu()
self.subscribe()
def initDotGraphPair(self):
"""
Initialize the dot visualization.
Must be provided by any subclass of GraphView.
"""
self.dotGraphPair = DotGraphPair()
def initViewMenu(self):
"""
Initialise the context-sensitive pop-up menu for graph viewing options.
Must be provided by any subclass fo GraphView.
"""
self.viewMenu = ViewMenu(self, self.aligner, self.algraephFrame)
def replaceViewMenu(self):
"""
Replace the View menu in the menu bar with the new View menu
"""
menuBar = self.algraephFrame.GetMenuBar()
pos = menuBar.FindMenu("View")
menuBar.Replace(pos, self.viewMenu, "View")
def Destroy(self):
# When a new corpus is loaded, the current GraphView object is destroyed
# and a new one is initialised.
# We need to unsubscibe the methods of the old object to make sure they
# are no longer called by pubsub, which would result in a PyDeadObject exception.
# Hence this override of Destroy.
self.unsubscribe()
wx.html.HtmlWindow.Destroy(self)
def subscribe(self):
"""
subscribe handlers for events send by aligner
"""
self.subscribe_stage_1()
self.subscribe_stage_2()
self.subscribe_stage_3()
def subscribe_stage_1(self):
pass
def subscribe_stage_2(self):
# newGraphPair
subscribe(self.updateFromGraph, "newGraphPair.viz")
subscribe(self.updateToGraph, "newGraphPair.viz")
subscribe(self.updateAlignment, "newGraphPair.viz")
# newRelation
subscribe(self.updateAlignment, "newRelation.viz")
def subscribe_stage_3(self):
# newGraphPair
subscribe(self.updateImageFile, "newGraphPair.gui")
subscribe(self.updateImageMap, "newGraphPair.gui")
subscribe(self.updateHtmlPage, "newGraphPair.gui")
# newRelation
subscribe(self.updateImageFile, "newRelation.gui")
subscribe(self.updateImageMap, "newRelation.gui")
subscribe(self.updateHtmlPage, "newRelation.gui")
def unsubscribe(self):
# called when GraphView object is destoyed
unsubscribe(self.updateFromGraph)
unsubscribe(self.updateToGraph)
unsubscribe(self.updateAlignment)
unsubscribe(self.updateImageFile)
unsubscribe(self.updateImageMap)
unsubscribe(self.updateHtmlPage)
# ------------------------------------------------------------------------------
# Event handlers
# ------------------------------------------------------------------------------
def OnCellMouseHover(self, cell, x, y):
# auto changing of cursor shape when mouse hovers over links seems to
# fail with image maps, so implement explicitly here
if cell.GetLink(x,y):
self.SetCursor(wx.StockCursor(wx.CURSOR_HAND))
else:
self.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))
def OnCellClicked(self, cell, x, y, event):
linkinfo = cell.GetLink(x, y)
if event.RightUp():
self.onViewPopupMenu(linkinfo, event)
elif linkinfo:
self.onNodeSelection(linkinfo)
def onNodeSelection(self, linkinfo):
node = linkinfo.GetHref()
if node.startswith(self.from_node_prefix):
node = node[len(self.from_node_prefix):]
self.aligner.set_from_node(node)
elif node.startswith(self.to_node_prefix):
node = node[len(self.to_node_prefix):]
self.aligner.set_to_node(node)
def onViewPopupMenu(self, linkinfo, event):
pos = event.GetPosition()
pos = self.ScreenToClient(pos)
self.PopupMenu(self.viewMenu, pos)
# ------------------------------------------------------------------------------
# ViewMenu handlers
# ------------------------------------------------------------------------------
# no handlers because minimal ViewMenu is empty
# ------------------------------------------------------------------------------
# Dotgraph updates
# ------------------------------------------------------------------------------
def updateFromGraph(self, msg=None):
receive(self.updateFromGraph, msg)
from_graph = self.aligner.get_from_graph()
self.dotGraphPair.from_subgraph.update_structure(from_graph)
def updateToGraph(self, msg=None):
receive(self.updateToGraph, msg)
to_graph = self.aligner.get_to_graph()
self.dotGraphPair.to_subgraph.update_structure(to_graph)
def updateAlignment(self, msg=None):
receive(self.updateAlignment, msg)
from_graph = self.aligner.get_from_graph()
to_graph = self.aligner.get_to_graph()
graph_pair = self.aligner.get_graph_pair()
self.dotGraphPair.update_structure(graph_pair, from_graph, to_graph)
# ------------------------------------------------------------------------------
# Image(map) updates
# ------------------------------------------------------------------------------
def updateImageFile(self, msg=None):
"""
call graphviz to update the image file
"""
receive(self.updateImageFile, msg)
update_image_file(self.dotGraphPair.to_string())
def updateImageMap(self, msg=None):
"""
call graphviz to update the image map
"""
receive(self.updateImageMap, msg)
update_image_map(self.dotGraphPair.to_string())
def updateHtmlPage(self, msg=None):
"""
reload html page and reposition scroll bars
"""
receive(self.updateHtmlPage, msg)
# Backup current scroll positions
# This is not perfect, because the size of the image may change
# (e.g. with or without dependency relation labels)
xpos, ypos = self.GetViewStart()
html = get_html()
self.SetPage(html)
# Restore scroll positions
self.Scroll(xpos, ypos)
class BasicGraphView(GraphView):
"""
Basic class for viewing graphs
"""
def initDotGraphPair(self):
self.dotGraphPair = BasicDotGraphPair()
def initViewMenu(self):
self.viewMenu = BasicViewMenu(self, self.aligner, self.algraephFrame)
def subscribe_stage_1(self):
GraphView.subscribe_stage_1(self)
# newGraphPair
subscribe(self.updateEdgeFocus, "newGraphPair.viz")
subscribe(self.updateNodeFocus, "newGraphPair.viz")
subscribe(self.unfoldAllNodes, "newGraphPair.viz")
# newRelation
subscribe(self.updateEdgeFocus, "newRelation.viz")
# newNodeSelect
subscribe(self.updateNodeFocus, "newNodeSelect.viz")
subscribe(self.updateEdgeFocus, "newNodeSelect.viz")
# newNodeSelect
# ********* FIXME: following updates may be superfluous
subscribe(self.updateAlignment, "newNodeSelect.viz")
def subscribe_stage_3(self):
GraphView.subscribe_stage_3(self)
# newNodeSelect
# ********* FIXME: following updates may be superfluous
subscribe(self.updateImageFile, "newNodeSelect.gui")
subscribe(self.updateHtmlPage, "newNodeSelect.gui")
# foldNodeChanged
subscribe(self.updateFromGraph, "foldNodeChanged")
subscribe(self.updateToGraph, "foldNodeChanged")
# hidden nodes may be focused or aligned, so we need to refresh
# node focus, edge focus and aligment as well, which check themselves
# if any nodes have now become hidden
subscribe(self.updateNodeFocus, "foldNodeChanged")
subscribe(self.updateEdgeFocus, "foldNodeChanged")
subscribe(self.updateAlignment, "foldNodeChanged")
subscribe(self.updateFolded, "foldNodeChanged")
subscribe(self.updateImageFile, "foldNodeChanged")
subscribe(self.updateImageMap, "foldNodeChanged")
subscribe(self.updateHtmlPage, "foldNodeChanged")
# unfoldAllNodes
subscribe(self.unfoldAllNodes, "unfoldAllNodes")
subscribe(self.updateFromGraph, "unfoldAllNodes")
subscribe(self.updateToGraph, "unfoldAllNodes")
subscribe(self.updateAlignment, "unfoldAllNodes")
subscribe(self.updateImageFile, "unfoldAllNodes")
subscribe(self.updateImageMap, "unfoldAllNodes")
subscribe(self.updateHtmlPage, "unfoldAllNodes")
# markSelectedNodesChanged
subscribe(self.updateNodeFocus, "markSelectedNodesChanged")
subscribe(self.updateFromGraph, "markSelectedNodesChanged")
subscribe(self.updateToGraph, "markSelectedNodesChanged")
subscribe(self.updateImageFile, "markSelectedNodesChanged")
subscribe(self.updateHtmlPage, "markSelectedNodesChanged")
# markAlignedNodesChanged
subscribe(self.updateAlignment, "markAlignedNodesChanged")
subscribe(self.updateImageFile, "markAlignedNodesChanged")
subscribe(self.updateHtmlPage, "markAlignedNodesChanged")
# labelEdgesChanged
subscribe(self.updateFromGraph, "labelEdgesChanged")
subscribe(self.updateToGraph, "labelEdgesChanged")
subscribe(self.updateImageFile, "labelEdgesChanged")
subscribe(self.updateImageMap, "labelEdgesChanged")
subscribe(self.updateHtmlPage, "labelEdgesChanged")
# markSelectedAlignmentsChanged
subscribe(self.updateEdgeFocus ,"markSelectedAlignmentsChanged")
subscribe(self.updateAlignment ,"markSelectedAlignmentsChanged")
subscribe(self.updateImageFile ,"markSelectedAlignmentsChanged")
subscribe(self.updateHtmlPage ,"markSelectedAlignmentsChanged")
# hideAlignmentsChanged
subscribe(self.updateAlignment, "hideAlignmentsChanged")
subscribe(self.updateImageFile, "hideAlignmentsChanged")
subscribe(self.updateImageMap, "hideAlignmentsChanged")
subscribe(self.updateHtmlPage, "hideAlignmentsChanged")
# coSelectAlignedNodeChanged
subscribe(self.updateCoSelectedNode, "coSelectAlignedNodeChanged")
# self.updateCoSelectedNode() will trigger a "newNodeSelect",
# so there is no need to subscribe others
def unsubscribe(self):
GraphView.unsubscribe(self)
unsubscribe(self.unfoldAllNodes)
unsubscribe(self.updateNodeFocus)
unsubscribe(self.updateEdgeFocus)
unsubscribe(self.updateCoSelectedNode)
unsubscribe(self.updateFolded)
# ------------------------------------------------------------------------------
# Event handlers
# ------------------------------------------------------------------------------
def OnCellClicked(self, cell, x, y, event):
linkinfo = cell.GetLink(x, y)
# store node for possible use by onToggleFold
if linkinfo:
self.node = linkinfo.GetHref()
else:
self.node = ""
if event.ShiftDown():
self.onToggleFold()
else:
GraphView.OnCellClicked(self, cell, x, y, event)
def onViewPopupMenu(self, linkinfo, event):
self.setFoldNodeItem(linkinfo)
GraphView.onViewPopupMenu(self, linkinfo, event)
self.resetFoldNodeItem()
def setFoldNodeItem(self, linkinfo):
# enable and/or check the "Fold Node" menu item
if self.node.startswith(self.from_node_prefix):
node = self.node[len(self.from_node_prefix):]
graph = self.aligner.get_from_graph()
if graph.node_is_non_terminal(node):
self.viewMenu.enableFoldNodeItem()
if self.dotGraphPair.from_subgraph.is_folded(node):
self.viewMenu.checkFoldNodeItem()
elif self.node.startswith(self.to_node_prefix):
node = self.node[len(self.to_node_prefix):]
graph = self.aligner.get_to_graph()
if graph.node_is_non_terminal(node):
self.viewMenu.enableFoldNodeItem()
if self.dotGraphPair.to_subgraph.is_folded(node):
self.viewMenu.checkFoldNodeItem()
def resetFoldNodeItem(self):
# return to the default state, i.e.
# disable and uncheck the "Fold Node" menu item
self.viewMenu.disableFoldNodeItem()
self.viewMenu.uncheckFoldNodeItem()
def onToggleFold(self, evt=None):
if self.node.startswith(self.from_node_prefix):
node = self.node[len(self.from_node_prefix):]
graph = self.aligner.get_from_graph()
if graph.node_is_terminal(node):
return
self.dotGraphPair.from_subgraph.toggle_node_fold(graph, node)
elif self.node.startswith(self.to_node_prefix):
node = self.node[len(self.to_node_prefix):]
graph = self.aligner.get_to_graph()
if graph.node_is_terminal(node):
return
self.dotGraphPair.to_subgraph.toggle_node_fold(graph, node)
else:
# Mouse not on a node. Should not happen, because in that case the
# "Fold Node" menu item is disabled.
return
send(self.onToggleFold, "foldNodeChanged")
def onUnfoldAllNodes(self, evt):
"""
handler for the 'Unfold All Nodes' option in GraphmlViewMenu
"""
send(self.onUnfoldAllNodes, "unfoldAllNodes")
# ------------------------------------------------------------------------------
# ViewMenu handlers
# ------------------------------------------------------------------------------
def onMarkAlignedNodes(self, evt):
"""
handler for the 'Mark Aligned Nodes" in the View menu
"""
self.dotGraphPair.mark_aligned_nodes_option(evt.Checked())
send(self.onMarkAlignedNodes, "markAlignedNodesChanged")
send(self.onMarkAlignedNodes, "statusDescription",
"Mark Aligned Nodes option is %s" % evt.Checked())
def onMarkSelectedNodes(self, evt):
"""
handler for the 'Mark Selected Nodes" in the View menu
"""
self.dotGraphPair.from_subgraph.mark_selected_nodes_option(evt.Checked())
self.dotGraphPair.to_subgraph.mark_selected_nodes_option(evt.Checked())
send(self.onMarkSelectedNodes, "markSelectedNodesChanged")
send(self.onMarkSelectedNodes, "statusDescription",
"Mark Selected Nodes option is %s" % evt.Checked())
def onCoSelectAlignedNode(self, evt):
"""
handler for the 'Co-select Aligned Node' option in the View menu
"""
self.aligner.co_node_selection_mode(evt.Checked())
send(self.onCoSelectAlignedNode, "coSelectAlignedNodeChanged")
send(self.onCoSelectAlignedNode, "statusDescription",
"Co-select Aligned Node option is %s" % evt.Checked())
def onLabelEdges(self, evt):
"""
handler for the 'Label Edges' option in the View menu
"""
self.dotGraphPair.from_subgraph.label_edges_option(evt.Checked())
self.dotGraphPair.to_subgraph.label_edges_option(evt.Checked())
send(self.onLabelEdges, "labelEdgesChanged")
send(self.onLabelEdges, "statusDescription",
"Label Edges option is %s" % evt.Checked())
def onMarkSelectedAlignments(self, evt):
"""
handler for the 'Mark Selected Alignments' option in View menu
"""
self.dotGraphPair.mark_selected_alignments_option(evt.Checked())
send(self.onLabelEdges, "markSelectedAlignmentsChanged")
send(self.onMarkSelectedAlignments, "statusDescription",
"Mark Selected Alignments option is %s" % evt.Checked())
def onHideAlignments(self, evt):
"""
handler for the 'Hide Alignments' option in View menu
"""
self.dotGraphPair.hide_alignments_option(evt.Checked())
send(self.onHideAlignments, "hideAlignmentsChanged")
send(self.onHideAlignments, "statusDescription",
"Hide Alignments option is %s" % evt.Checked())
def onSaveImage(self, evt):
"""
handler for the 'Save Image' option in View menu
"""
formats = get_output_formats()
if "png" in formats:
formats.remove("png")
formats = ["png"] + formats
else:
formats.remove("dot")
formats = ["dot"] + formats
wildcard = ""
for s in formats:
wildcard += s + " (*." + s + ")|" + "*." + s + "|"
wildcard = wildcard[:-1]
filename = self.aligner.get_corpus_filename()
filename = splitext(basename(filename))[0]
filename += "_%d" % self.aligner.get_graph_pair_counter()[0]
filename += "." + formats[0]
dlg = wx.FileDialog(self, "Save image...",
defaultFile=filename,
defaultDir=self.aligner.get_corpus_dir() or getcwd(),
style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT,
wildcard =wildcard)
if dlg.ShowModal() == wx.ID_OK:
file = dlg.GetPath()
if " " in file:
file = '"' + file + '"'
form = formats[dlg.GetFilterIndex()]
send(self.onSaveImage, "statusDescription", "Saving image to file %s ..." % file)
draw(self.dotGraphPair.to_string(), img_file=file, img_format=form)
send(self.onSaveImage, "statusDescription", "Saved image to file %s" % file)
dlg.Destroy()
## Some experimental code to draw on the screen
##def OnCellMouseHover(self, cell, x, y):
##try:
##print cell.GetLink(x,y).GetHref()
##print cell.GetLink(x,y).GetTarget()
##dc = wx.ClientDC(self)
##dc.DrawCircle(100,100,25)
##except:
##pass
# ------------------------------------------------------------------------------
# Dotgraph updates
# ------------------------------------------------------------------------------
def unfoldAllNodes(self, msg=None):
receive(self.unfoldAllNodes, msg)
self.dotGraphPair.from_subgraph.unfold_all()
self.dotGraphPair.to_subgraph.unfold_all()
def updateFolded(self, msg):
receive(self.updateFolded, msg)
self.dotGraphPair.from_subgraph.update_folded()
self.dotGraphPair.to_subgraph.update_folded()
def updateCoSelectedNode(self, msg=None):
receive(self.updateCoSelectedNode, msg)
from_node = self.aligner.get_from_node()
if from_node:
self.aligner.set_from_node(from_node)
else:
to_node = self.aligner.get_to_node()
if to_node:
self.aligner.set_to_node(to_node)
def updateNodeFocus(self, msg=None):
receive(self.updateNodeFocus, msg)
from_graph = self.aligner.get_from_graph()
to_graph = self.aligner.get_to_graph()
from_node = self.aligner.get_from_node()
self.dotGraphPair.from_subgraph.update_node_focus(from_node)
to_node = self.aligner.get_to_node()
self.dotGraphPair.to_subgraph.update_node_focus(to_node)
def updateEdgeFocus(self, msg=None):
receive(self.updateEdgeFocus, msg)
self.dotGraphPair.clear_edge_focus()
from_node = self.aligner.get_from_node()
to_node = self.aligner.get_aligned_to_node()
self.dotGraphPair.update_edge_focus(from_node, to_node)
to_node= self.aligner.get_to_node()
from_node = self.aligner.get_aligned_from_node()
self.dotGraphPair.update_edge_focus(from_node, to_node)
class ViewMenu(wx.Menu):
"""
Minimal class for (empty) pop-up menu with (no) viewing options
self.graphView (a subclass of GraphView) must provide handlers for all
wx.EVT_MENU. However, binding must occur at the level of algraephFrame,
otherwise menu events from the View menu in the menu bar will be
unhandled!
"""
def __init__(self, graphView, aligner, algraephFrame):
wx.Menu.__init__(self)
self.graphView = graphView
self.aligner = aligner
self.algraephFrame = algraephFrame
self.makeMenu()
def makeMenu(self):
"""
subclasses must implement this method for adding menu items
"""
pass
def isChecked(self, item_text):
"""
returns True if a menu itme with this name is checked
"""
for item in self.GetMenuItems():
if item.GetItemLabelText() == item_text:
return item.IsChecked()
class BasicViewMenu(ViewMenu):
"""
Basic class for pop-up menu with basic viewing options
"""
def checkFoldNodeItem(self):
self.Check(self.fold_item_id, True)
def uncheckFoldNodeItem(self):
self.Check(self.fold_item_id, False)
def enableFoldNodeItem(self):
self.Enable(self.fold_item_id, True)
def disableFoldNodeItem(self):
self.Enable(self.fold_item_id, False)
def makeMenu(self):
self.appendFoldOptions()
self.AppendSeparator()
self.appendNodeViewOptions()
self.AppendSeparator()
self.appendEdgeViewOptions()
self.AppendSeparator()
self.appendAlignmentViewOptions()
self.AppendSeparator()
self.appendOtherViewOptions()
def appendFoldOptions(self):
fold_item = self.Append(-1,
"&Fold Node",
"Hide all descendants of this node",
wx.ITEM_CHECK)
self.fold_item_id = fold_item.GetId()
self.disableFoldNodeItem()
self.algraephFrame.Bind(wx.EVT_MENU,
self.graphView.onToggleFold,
fold_item)
self.algraephFrame.Bind(wx.EVT_MENU,
self.graphView.onUnfoldAllNodes,
self.Append(-1,
"&Unfold All Nodes\tCtrl-U",
"Reveal all descendants of all nodes"))
def appendNodeViewOptions(self):
item = self.Append(-1,
"Mark Aligned &Nodes",
"Mark nodes which are already aligned",
wx.ITEM_CHECK)
self.algraephFrame.Bind(wx.EVT_MENU,
self.graphView.onMarkAlignedNodes,
item)
item = self.Append(-1,
"&Mark Selected Nodes\tCtrl-M",
"Highlight currently selected nodes",
wx.ITEM_CHECK)
self.algraephFrame.Bind(wx.EVT_MENU,
self.graphView.onMarkSelectedNodes,
item)
item = self.Append(-1,
"&Co-select Aligned Node\tCtrl-K",
"Automatically select the aligned node in the other graph",
wx.ITEM_CHECK)
self.algraephFrame.Bind(wx.EVT_MENU,
self.graphView.onCoSelectAlignedNode,
item)
def appendEdgeViewOptions(self):
item = self.Append(-1,
"&Label Edges",
"Label edges with relations",
wx.ITEM_CHECK)
self.algraephFrame.Bind(wx.EVT_MENU,
self.graphView.onLabelEdges,
item)
def appendAlignmentViewOptions(self):
item = self.Append(-1,
"Mark Selected &Alignments\tCtrl-A",
"Highlight the currently selected alignments",
wx.ITEM_CHECK)
self.algraephFrame.Bind(wx.EVT_MENU,
self.graphView.onMarkSelectedAlignments,
item)
item = self.Append(-1,
"&Hide Alignments\tCtrl-H",
"Hides all alignments except those of the selected nodes(s)",
wx.ITEM_CHECK)
self.algraephFrame.Bind(wx.EVT_MENU,
self.graphView.onHideAlignments,
item)
def appendOtherViewOptions(self):
self.algraephFrame.Bind(wx.EVT_MENU,
self.graphView.onSaveImage,
self.Append(-1,
"Save &Image",
"save current image of graphs to a file"))
# this funny statement is to trick py2exe into including these modules
# in the list of dependencies when building an exe
if False:
from graeph.alpino import graphview, dotgraph
from graeph.graphml import graphview, dotgraph | emsrc/algraeph | lib/graeph/graphview.py | Python | gpl-3.0 | 28,393 |
"""Routines to help recognizing sound files.
Function whathdr() recognizes various types of sound file headers.
It understands almost all headers that SOX can decode.
The return tuple contains the following items, in this order:
- file type (as SOX understands it)
- sampling rate (0 if unknown or hard to decode)
- number of channels (0 if unknown or hard to decode)
- number of frames in the file (-1 if unknown or hard to decode)
- number of bits/sample, or 'U' for U-LAW, or 'A' for A-LAW
If the file doesn't have a recognizable type, it returns None.
If the file can't be opened, IOError is raised.
To compute the total time, divide the number of frames by the
sampling rate (a frame contains a sample for each channel).
Function what() calls whathdr(). (It used to also use some
heuristics for raw data, but this doesn't work very well.)
Finally, the function test() is a simple main program that calls
what() for all files mentioned on the argument list. For directory
arguments it calls what() for all files in that directory. Default
argument is "." (testing all files in the current directory). The
option -r tells it to recurse down directories found inside
explicitly given directories.
"""
# The file structure is top-down except that the test program and its
# subroutine come last.
__all__ = ["what","whathdr"]
def what(filename):
"""Guess the type of a sound file"""
res = whathdr(filename)
return res
def whathdr(filename):
"""Recognize sound headers"""
f = open(filename, 'rb')
h = f.read(512)
for tf in tests:
res = tf(h, f)
if res:
return res
return None
#-----------------------------------#
# Subroutines per sound header type #
#-----------------------------------#
tests = []
def test_aifc(h, f):
import aifc
if h[:4] != 'FORM':
return None
if h[8:12] == 'AIFC':
fmt = 'aifc'
elif h[8:12] == 'AIFF':
fmt = 'aiff'
else:
return None
f.seek(0)
try:
a = aifc.openfp(f, 'r')
except (EOFError, aifc.Error):
return None
return (fmt, a.getframerate(), a.getnchannels(), \
a.getnframes(), 8*a.getsampwidth())
tests.append(test_aifc)
def test_au(h, f):
if h[:4] == '.snd':
f = get_long_be
elif h[:4] in ('\0ds.', 'dns.'):
f = get_long_le
else:
return None
type = 'au'
hdr_size = f(h[4:8])
data_size = f(h[8:12])
encoding = f(h[12:16])
rate = f(h[16:20])
nchannels = f(h[20:24])
sample_size = 1 # default
if encoding == 1:
sample_bits = 'U'
elif encoding == 2:
sample_bits = 8
elif encoding == 3:
sample_bits = 16
sample_size = 2
else:
sample_bits = '?'
frame_size = sample_size * nchannels
return type, rate, nchannels, data_size//frame_size, sample_bits
tests.append(test_au)
def test_hcom(h, f):
if h[65:69] != 'FSSD' or h[128:132] != 'HCOM':
return None
divisor = get_long_be(h[128+16:128+20])
return 'hcom', 22050//divisor, 1, -1, 8
tests.append(test_hcom)
def test_voc(h, f):
if h[:20] != 'Creative Voice File\032':
return None
sbseek = get_short_le(h[20:22])
rate = 0
if 0 <= sbseek < 500 and h[sbseek] == '\1':
ratecode = ord(h[sbseek+4])
rate = int(1000000.0 / (256 - ratecode))
return 'voc', rate, 1, -1, 8
tests.append(test_voc)
def test_wav(h, f):
# 'RIFF' <len> 'WAVE' 'fmt ' <len>
if h[:4] != 'RIFF' or h[8:12] != 'WAVE' or h[12:16] != 'fmt ':
return None
style = get_short_le(h[20:22])
nchannels = get_short_le(h[22:24])
rate = get_long_le(h[24:28])
sample_bits = get_short_le(h[34:36])
return 'wav', rate, nchannels, -1, sample_bits
tests.append(test_wav)
def test_8svx(h, f):
if h[:4] != 'FORM' or h[8:12] != '8SVX':
return None
# Should decode it to get #channels -- assume always 1
return '8svx', 0, 1, 0, 8
tests.append(test_8svx)
def test_sndt(h, f):
if h[:5] == 'SOUND':
nsamples = get_long_le(h[8:12])
rate = get_short_le(h[20:22])
return 'sndt', rate, 1, nsamples, 8
tests.append(test_sndt)
def test_sndr(h, f):
if h[:2] == '\0\0':
rate = get_short_le(h[2:4])
if 4000 <= rate <= 25000:
return 'sndr', rate, 1, -1, 8
tests.append(test_sndr)
#---------------------------------------------#
# Subroutines to extract numbers from strings #
#---------------------------------------------#
def get_long_be(s):
return (ord(s[0])<<24) | (ord(s[1])<<16) | (ord(s[2])<<8) | ord(s[3])
def get_long_le(s):
return (ord(s[3])<<24) | (ord(s[2])<<16) | (ord(s[1])<<8) | ord(s[0])
def get_short_be(s):
return (ord(s[0])<<8) | ord(s[1])
def get_short_le(s):
return (ord(s[1])<<8) | ord(s[0])
#--------------------#
# Small test program #
#--------------------#
def test():
import sys
recursive = 0
if sys.argv[1:] and sys.argv[1] == '-r':
del sys.argv[1:2]
recursive = 1
try:
if sys.argv[1:]:
testall(sys.argv[1:], recursive, 1)
else:
testall(['.'], recursive, 1)
except KeyboardInterrupt:
sys.stderr.write('\n[Interrupted]\n')
sys.exit(1)
def testall(list, recursive, toplevel):
import sys
import os
for filename in list:
if os.path.isdir(filename):
print filename + '/:',
if recursive or toplevel:
print 'recursing down:'
import glob
names = glob.glob(os.path.join(filename, '*'))
testall(names, recursive, 0)
else:
print '*** directory (use -r) ***'
else:
print filename + ':',
sys.stdout.flush()
try:
print what(filename)
except IOError:
print '*** not found ***'
if __name__ == '__main__':
test()
| huran2014/huran.github.io | wot_gateway/usr/lib/python2.7/sndhdr.py | Python | gpl-2.0 | 5,973 |
import wx
from gooey.gui.util import wx_util
from gooey.gui.windows.advanced_config import ConfigPanel
from gooey.gui.windows.sidebar import Sidebar
basic_config = {
'widgets': [{
'type': 'CommandField',
'required': True,
'data': {
'display_name': 'Enter Commands',
'help': 'Enter command line arguments',
'nargs': '',
'commands': '',
'choices': [],
'default': None,
}
}],
}
FLAT = 'standard'
COLUMN = 'column'
class FlatLayout(wx.Panel):
def __init__(self, *args, **kwargs):
super(FlatLayout, self).__init__(*args, **kwargs)
self.SetDoubleBuffered(True)
self.main_content = ConfigPanel(self, opt_cols=3)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.main_content, 3, wx.EXPAND)
self.SetSizer(sizer)
class ColumnLayout(wx.Panel):
def __init__(self, *args, **kwargs):
super(ColumnLayout, self).__init__(*args, **kwargs)
self.SetDoubleBuffered(True)
self.sidebar = Sidebar(self)
self.main_content = ConfigPanel(self, opt_cols=2)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.sidebar, 1, wx.EXPAND)
sizer.Add(wx_util.vertical_rule(self), 0, wx.EXPAND)
sizer.Add(self.main_content, 3, wx.EXPAND)
self.SetSizer(sizer)
| ME-ICA/me-ica | gooey/gui/windows/layouts.py | Python | lgpl-2.1 | 1,332 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._connected_kubernetes_client import ConnectedKubernetesClient
from ._version import VERSION
__version__ = VERSION
__all__ = ['ConnectedKubernetesClient']
try:
from ._patch import patch_sdk # type: ignore
patch_sdk()
except ImportError:
pass
| Azure/azure-sdk-for-python | sdk/hybridkubernetes/azure-mgmt-hybridkubernetes/azure/mgmt/hybridkubernetes/__init__.py | Python | mit | 730 |
# -*- coding: utf-8 -*-
"""Bio2BEL HMDB is a package which allows the user to work with a local sqlite version of the Human Metabolome
Database (HMDB).
Next to creating the local database there are also functions provided, which will enrich given Biological Expression
Language (BEL) graphs with information about metabolites, proteins and diseases, that is present in HMDB.
HMDB BEL namespaces for these BEL graphs can be written.
Installation
------------
Get the Latest
~~~~~~~~~~~~~~~
Download the most recent code from `GitHub <https://github.com/bio2bel/hmdb>`_ with:
.. code-block:: sh
$ python3 -m pip install git+https://github.com/bio2bel/hmdb.git
For Developers
~~~~~~~~~~~~~~
Clone the repository from `GitHub <https://github.com/bio2bel/hmdb>`_ and install in editable mode with:
.. code-block:: sh
$ git clone https://github.com/bio2bel/hmdb.git
$ cd hmdb
$ python3 -m pip install -e .
Setup
-----
1. Create a :class:`bio2bel_hmdb.Manager` object
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
>>> from bio2bel_hmdb import Manager
>>> manager = Manager()
2. Create the tables in the database
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
>>> manager.create_all()
3. Populate the database
~~~~~~~~~~~~~~~~~~~~~~~~
This step will take sometime since the HMDB XML data needs to be downloaded, parsed, and fed into the database line
by line.
>>> manager.populate()
"""
from .manager import Manager
__version__ = '0.1.1-dev'
__title__ = 'bio2bel_hmdb'
__description__ = "A package for converting the Human Metabolome Database (HMDB) to BEL."
__url__ = 'https://github.com/bio2bel/hmdb'
__author__ = 'Charles Tapley Hoyt and Colin Birkenbihl'
__email__ = 'charles.hoyt@scai.fraunhofer.de'
__license__ = 'MIT License'
__copyright__ = 'Copyright (c) 2017-2018 Charles Tapley Hoyt and Colin Birkenbihl'
| bio2bel/hmdb | src/bio2bel_hmdb/__init__.py | Python | mit | 1,832 |
# lists.py
# creation
>>> [] # empty list
[]
>>> list() # same as []
[]
>>> [1, 2, 3] # as with tuples, items are comma separated
[1, 2, 3]
>>> [x + 5 for x in [2, 3, 4]] # Python is magic
[7, 8, 9]
>>> list((1, 3, 5, 7, 9)) # list from a tuple
[1, 3, 5, 7, 9]
>>> list('hello') # list from a string
['h', 'e', 'l', 'l', 'o']
# main methods
>>> a = [1, 2, 1, 3]
>>> a.append(13) # we can append anything at the end
>>> a
[1, 2, 1, 3, 13]
>>> a.count(1) # how many `1` are there in the list?
2
>>> a.extend([5, 7]) # extend the list by another (or sequence)
>>> a
[1, 2, 1, 3, 13, 5, 7]
>>> a.index(13) # position of `13` in the list (0-based indexing)
4
>>> a.insert(0, 17) # insert `17` at position 0
>>> a
[17, 1, 2, 1, 3, 13, 5, 7]
>>> a.pop() # pop (remove and return) last element
7
>>> a.pop(3) # pop element at position 3
1
>>> a
[17, 1, 2, 3, 13, 5]
>>> a.remove(17) # remove `17` from the list
>>> a
[1, 2, 3, 13, 5]
>>> a.reverse() # reverse the order of the elements in the list
>>> a
[5, 13, 3, 2, 1]
>>> a.sort() # sort the list
>>> a
[1, 2, 3, 5, 13]
>>> a.clear() # remove all elements from the list
>>> a
[]
# extending
>>> a = list('hello') # makes a list from a string
>>> a
['h', 'e', 'l', 'l', 'o']
>>> a.append(100) # append 100, heterogeneous type
>>> a
['h', 'e', 'l', 'l', 'o', 100]
>>> a.extend((1, 2, 3)) # extend using tuple
>>> a
['h', 'e', 'l', 'l', 'o', 100, 1, 2, 3]
>>> a.extend('...') # extend using string
>>> a
['h', 'e', 'l', 'l', 'o', 100, 1, 2, 3, '.', '.', '.']
# most common operations
>>> a = [1, 3, 5, 7]
>>> min(a) # minimum value in the list
1
>>> max(a) # maximum value in the list
7
>>> sum(a) # sum of all values in the list
16
>>> len(a) # number of elements in the list
4
>>> b = [6, 7, 8]
>>> a + b # `+` with list means concatenation
[1, 3, 5, 7, 6, 7, 8]
>>> a * 2 # `*` has also a special meaning
[1, 3, 5, 7, 1, 3, 5, 7]
# cool sorting
>>> from operator import itemgetter
>>> a = [(5, 3), (1, 3), (1, 2), (2, -1), (4, 9)]
>>> sorted(a)
[(1, 2), (1, 3), (2, -1), (4, 9), (5, 3)]
>>> sorted(a, key=itemgetter(0))
[(1, 3), (1, 2), (2, -1), (4, 9), (5, 3)]
>>> sorted(a, key=itemgetter(0, 1))
[(1, 2), (1, 3), (2, -1), (4, 9), (5, 3)]
>>> sorted(a, key=itemgetter(1))
[(2, -1), (1, 2), (5, 3), (1, 3), (4, 9)]
>>> sorted(a, key=itemgetter(1), reverse=True)
[(4, 9), (5, 3), (1, 3), (1, 2), (2, -1)]
| mkhuthir/learnPython | Book_learning-python-r1.1/ch2/lists.py | Python | mit | 2,386 |
# -*- coding: utf-8 -*-
import pytest
import random
import time
from cfme import test_requirements
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.cloud.provider.gce import GCEProvider
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.configure.configuration import get_server_roles, set_server_roles, candu
from cfme.common.provider import BaseProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from fixtures.pytest_store import store
from fixtures.provider import setup_or_skip
from operator import attrgetter
from utils import testgen
from utils import conf
from utils.blockers import BZ
from utils.log import logger
from utils.version import current_version
def pytest_generate_tests(metafunc):
argnames, argvalues, idlist = testgen.providers_by_class(
metafunc,
[VMwareProvider, RHEVMProvider, EC2Provider, OpenStackProvider, AzureProvider, GCEProvider],
required_fields=[(['cap_and_util', 'capandu_vm'], 'cu-24x7')])
testgen.parametrize(metafunc, argnames, argvalues, ids=idlist, scope="module")
pytestmark = [
pytest.mark.tier(1),
test_requirements.c_and_u
]
@pytest.yield_fixture(scope="module")
def enable_candu():
try:
original_roles = get_server_roles()
new_roles = original_roles.copy()
new_roles.update({
'ems_metrics_coordinator': True,
'ems_metrics_collector': True,
'ems_metrics_processor': True,
'automate': False,
'smartstate': False})
set_server_roles(**new_roles)
candu.enable_all()
yield
finally:
candu.disable_all()
set_server_roles(**original_roles)
@pytest.yield_fixture(scope="module")
def clean_setup_provider(request, provider):
BaseProvider.clear_providers()
setup_or_skip(request, provider)
yield
BaseProvider.clear_providers()
@pytest.fixture(scope="module")
def metrics_collection(clean_setup_provider, provider, enable_candu):
"""Check the db is gathering collection data for the given provider.
Metadata:
test_flag: metrics_collection
"""
metrics_tbl = store.current_appliance.db.client['metrics']
mgmt_systems_tbl = store.current_appliance.db.client['ext_management_systems']
logger.info("Fetching provider ID for %s", provider.key)
mgmt_system_id = store.current_appliance.db.client.session.query(mgmt_systems_tbl).filter(
mgmt_systems_tbl.name == conf.cfme_data.get('management_systems', {})[provider.key]['name']
).first().id
logger.info("ID fetched; testing metrics collection now")
start_time = time.time()
host_count = 0
vm_count = 0
host_rising = False
vm_rising = False
timeout = 900.0 # 15 min
while time.time() < start_time + timeout:
last_host_count = host_count
last_vm_count = vm_count
logger.info("name: %s, id: %s, vms: %s, hosts: %s",
provider.key, mgmt_system_id, vm_count, host_count)
# Count host and vm metrics for the provider we're testing
host_count = store.current_appliance.db.client.session.query(metrics_tbl).filter(
metrics_tbl.parent_ems_id == mgmt_system_id).filter(
metrics_tbl.resource_type == "Host"
).count()
vm_count = store.current_appliance.db.client.session.query(metrics_tbl).filter(
metrics_tbl.parent_ems_id == mgmt_system_id).filter(
metrics_tbl.resource_type == "VmOrTemplate"
).count()
if host_rising is not True:
if host_count > last_host_count:
host_rising = True
if vm_rising is not True:
if vm_count > last_vm_count:
vm_rising = True
# only vms are collected for cloud
if provider.category == "cloud" and vm_rising:
return
# both vms and hosts must be collected for infra
elif provider.category == "infra" and vm_rising and host_rising:
return
else:
time.sleep(15)
if time.time() > start_time + timeout:
raise Exception("Timed out waiting for metrics to be collected")
def get_host_name(provider):
cfme_host = random.choice(provider.data["hosts"])
return cfme_host.name
def query_metric_db(appliance, provider, metric, vm_name=None, host_name=None):
metrics_tbl = appliance.db.client['metrics']
ems = appliance.db.client['ext_management_systems']
if vm_name is None:
if host_name is not None:
object_name = host_name
elif vm_name is not None:
object_name = vm_name
with appliance.db.client.transaction:
provs = (
appliance.db.client.session.query(metrics_tbl.id)
.join(ems, metrics_tbl.parent_ems_id == ems.id)
.filter(metrics_tbl.resource_name == object_name,
ems.name == provider.name)
)
return appliance.db.client.session.query(metrics_tbl).filter(
metrics_tbl.id.in_(provs.subquery()))
# Tests to check that specific metrics are being collected
@pytest.mark.uncollectif(
lambda provider: current_version() < "5.7" and provider.type == 'gce')
def test_raw_metric_vm_cpu(metrics_collection, appliance, provider):
vm_name = provider.data['cap_and_util']['capandu_vm']
if provider.category == "infra":
query = query_metric_db(appliance, provider, 'cpu_usagemhz_rate_average',
vm_name)
average_rate = attrgetter('cpu_usagemhz_rate_average')
elif provider.category == "cloud":
query = query_metric_db(appliance, provider, 'cpu_usage_rate_average',
vm_name)
average_rate = attrgetter('cpu_usage_rate_average')
for record in query:
if average_rate(record) is not None:
assert average_rate(record) > 0, 'Zero VM CPU Usage'
break
@pytest.mark.uncollectif(
lambda provider: provider.type == 'ec2' or provider.type == 'gce')
def test_raw_metric_vm_memory(metrics_collection, appliance, provider):
vm_name = provider.data['cap_and_util']['capandu_vm']
if provider.type == 'azure':
query = query_metric_db(appliance, provider, 'mem_usage_absolute_average',
vm_name)
average_rate = attrgetter('mem_usage_absolute_average')
else:
query = query_metric_db(appliance, provider, 'derived_memory_used',
vm_name)
average_rate = attrgetter('derived_memory_used')
for record in query:
if average_rate(record) is not None:
assert average_rate(record) > 0, 'Zero VM Memory Usage'
break
@pytest.mark.uncollectif(
lambda provider: current_version() < "5.7" and provider.type == 'gce')
@pytest.mark.meta(
blockers=[BZ(1408963, forced_streams=["5.7", "5.8", "upstream"],
unblock=lambda provider: provider.type != 'rhevm')]
)
def test_raw_metric_vm_network(metrics_collection, appliance, provider):
vm_name = provider.data['cap_and_util']['capandu_vm']
query = query_metric_db(appliance, provider, 'net_usage_rate_average',
vm_name)
for record in query:
if record.net_usage_rate_average is not None:
assert record.net_usage_rate_average > 0, 'Zero VM Network IO'
break
@pytest.mark.uncollectif(
lambda provider: provider.type == 'ec2')
@pytest.mark.meta(
blockers=[BZ(1322094, forced_streams=["5.6", "5.7"],
unblock=lambda provider: provider.type != 'rhevm')]
)
def test_raw_metric_vm_disk(metrics_collection, appliance, provider):
vm_name = provider.data['cap_and_util']['capandu_vm']
query = query_metric_db(appliance, provider, 'disk_usage_rate_average',
vm_name)
for record in query:
if record.disk_usage_rate_average is not None:
assert record.disk_usage_rate_average > 0, 'Zero VM Disk IO'
break
@pytest.mark.uncollectif(
lambda provider: provider.category == 'cloud')
def test_raw_metric_host_cpu(metrics_collection, appliance, provider):
host_name = get_host_name(provider)
query = query_metric_db(appliance, provider, 'cpu_usagemhz_rate_average',
host_name)
for record in query:
if record.cpu_usagemhz_rate_average is not None:
assert record.cpu_usagemhz_rate_average > 0, 'Zero Host CPU Usage'
break
@pytest.mark.uncollectif(
lambda provider: provider.category == 'cloud')
def test_raw_metric_host_memory(metrics_collection, appliance, provider):
host_name = get_host_name(provider)
query = query_metric_db(appliance, provider, 'derived_memory_used',
host_name)
for record in query:
if record.derived_memory_used is not None:
assert record.derived_memory_used > 0, 'Zero Host Memory Usage'
break
@pytest.mark.uncollectif(
lambda provider: provider.category == 'cloud')
def test_raw_metric_host_network(metrics_collection, appliance, provider):
host_name = get_host_name(provider)
query = query_metric_db(appliance, provider, 'net_usage_rate_average',
host_name)
for record in query:
if record.net_usage_rate_average is not None:
assert record.net_usage_rate_average > 0, 'Zero Host Network IO'
break
@pytest.mark.uncollectif(
lambda provider: provider.category == 'cloud')
@pytest.mark.meta(
blockers=[BZ(1424589, forced_streams=["5.7", "5.8", "upstream"],
unblock=lambda provider: provider.type != 'rhevm')]
)
def test_raw_metric_host_disk(metrics_collection, appliance, provider):
host_name = get_host_name(provider)
query = query_metric_db(appliance, provider, 'disk_usage_rate_average',
host_name)
for record in query:
if record.disk_usage_rate_average is not None:
assert record.disk_usage_rate_average > 0, 'Zero Host Disk IO'
break
| jteehan/cfme_tests | cfme/tests/test_utilization_metrics.py | Python | gpl-2.0 | 9,996 |
"""Class that manages the Internet proxy configuration.
Copyright 2014 Dario B. darizotas at gmail dot com
This software is licensed under a new BSD License.
Unported License. http://opensource.org/licenses/BSD-3-Clause
"""
from wininet.winproxysettings import *
from lxml import etree
import sys, os
import argparse
class WindowsProxyManager:
"""Class that manages the changes to the Internet proxy settings.
See also:
https://stackoverflow.com/questions/18117652/how-to-use-ctypes-windll-wininet-internetqueryoptionw-in-python
"""
def __init__(self):
"""Initialises the Internet proxy settings.
"""
self.settings = WinProxySettings()
def disable(self):
"""Disables Internet proxy settings or access directly to Internet.
"""
print 'Disabling Internet Proxy...'
option = {INTERNET_PER_CONN_FLAGS: PROXY_TYPE_DIRECT}
return self._change(option)
def current(self, file):
"""Prints current Internet proxy settings.
"""
option = (INTERNET_PER_CONN_OPTION * 5)()
option[0].dwOption = INTERNET_PER_CONN_FLAGS
option[1].dwOption = INTERNET_PER_CONN_AUTOCONFIG_URL
option[2].dwOption = INTERNET_PER_CONN_PROXY_SERVER
option[3].dwOption = INTERNET_PER_CONN_PROXY_BYPASS
option[4].dwOption = INTERNET_PER_CONN_AUTODISCOVERY_FLAGS
if self.settings.current(option):
print 'Current Internet Proxy options:'
print 'Proxy type: %i' % option[0].Value.dwValue
print 'Autoconfig URL: %s' % option[1].Value.pszValue
print 'Static Proxy server: %s' % option[2].Value.pszValue
print 'Proxy bypass URLs: %s' % option[3].Value.pszValue
#print 'Autodetect: %i' % option[4].Value.dwValue
if file:
try:
f = open(file, 'w')
f.write('<settings>\n')
f.write(' <type>' + str(option[0].Value.dwValue) + '</type>\n')
f.write(' <url>' + ('' if option[1].Value.pszValue is None else option[1].Value.pszValue) + '</url>\n')
f.write(' <proxy>' + ('' if option[2].Value.pszValue is None else option[2].Value.pszValue) + '</proxy>\n')
# Parses whether the local addresses are also excluded.
bypass = str(option[3].Value.pszValue)
local = bypass.find('<local>')
f.write(' <bypass local="' + ('1' if local > -1 else '0') + '">' + \
(bypass[:local] if local > -1 else '') + '</bypass>\n')
f.write('</settings>')
f.close()
except IOError as ex:
print '[Error]', ex
# Frees memory
# Windows 8 raises hangs the script while trying to free the memory from the strings.
#windll.kernel32.GlobalFree(option[1].Value.pszValue)
#windll.kernel32.GlobalFree(option[2].Value.pszValue)
#windll.kernel32.GlobalFree(option[3].Value.pszValue)
return True
else:
return False
def change(self, file):
"""Changes the Internet proxy settings according to the given file.
"""
try:
# Schema loading.
print 'Loading Internet Options schema...'
xsdFile = os.path.dirname(os.path.abspath(__file__)) + '\proxy-settings.xsd'
xsdDoc = etree.parse(xsdFile)
schema = etree.XMLSchema(xsdDoc)
print '[Done]'
# XML parsing and validation.
config = etree.parse(file)
print 'Validating Internet Proxy options at [%s]...' % file
if schema.validate(config):
print '[Done]'
# Creates the Internet Options structure
option = {}
type = config.xpath('/settings/type')
option[INTERNET_PER_CONN_FLAGS] = int(type[0].text)
url = config.xpath('/settings/url')
if len(url) > 0:
option[INTERNET_PER_CONN_AUTOCONFIG_URL] = '' if url[0].text is None else str(url[0].text)
proxy = config.xpath('/settings/proxy')
if len(proxy) > 0:
option[INTERNET_PER_CONN_PROXY_SERVER] = '' if proxy[0].text is None else str(proxy[0].text)
bypass = config.xpath('/settings/bypass')
if len(bypass) > 0:
bypassStr = '' if bypass[0].text is None else str(bypass[0].text) + ';'
# Local addresses to bypass.
bypassLocal = config.xpath('/settings/bypass/@local')
if int(bypassLocal[0]):
bypassStr += '<local>'
option[INTERNET_PER_CONN_PROXY_BYPASS] = str(bypassStr)
# Apply changes.
return self._change(option)
else:
print '[Error] The given Internet Proxy options does not comply with the expected schema.'
print schema.error_log
return False
except IOError as ex:
print '[Error]', ex
return False
def _change(self, setting):
"""Changes the Internet proxy settings according to the options given in a dict.
The dict holds as keys the INTERNET_PER_CONN_* constants.
"""
# Number of settings
num = len(setting)
option = (INTERNET_PER_CONN_OPTION * num)()
# For each setting, it populates the structure.
for k, v in setting.iteritems():
num -= 1
option[num].dwOption = k
if isinstance(v, int):
option[num].Value.dwValue = v
else:
option[num].Value.pszValue = v
return self.settings.change(option)
def current(args):
"""Wrapper function to use through argparse to get the current Internet Proxy settings"""
manager = WindowsProxyManager()
if not manager.current(args.export):
sys.exit(1)
def disable(args):
"""Wrapper function to use through argparse to disable the Internet Proxy settings"""
manager = WindowsProxyManager()
if not manager.disable():
sys.exit(1)
def change(args):
"""Wrapper function to use through argparse to change the Internet Proxy settings"""
manager = WindowsProxyManager()
if not manager.change(args.file):
sys.exit(1)
# Top-level argument parser
parser = argparse.ArgumentParser(description='Manages the Internet Proxy settings')
subparser = parser.add_subparsers(title='sub-commands', help='Available sub-commands')
# Current sub-command
parserCmdCurrent = subparser.add_parser('current', help='Retrieves the current Internet Proxy settings')
parserCmdCurrent.add_argument('-e', '--export', help='File to export current Internet Proxy settings')
parserCmdCurrent.set_defaults(func=current)
# Disable sub-command
parserCmdDisable = subparser.add_parser('disable', help='Disables Internet Proxy settings')
parserCmdDisable.set_defaults(func=disable)
# Change sub-command
parserCmdChange = subparser.add_parser('change', help='Changes Internet Proxy settings')
parserCmdChange.add_argument('-f', '--file', required=True, help='Proxy settings file')
parserCmdChange.set_defaults(func=change)
args = parser.parse_args()
args.func(args)
sys.exit(0) | darizotas/winproxy | WindowsProxyManager.py | Python | bsd-3-clause | 7,590 |
'''tzinfo timezone information for Africa/Harare.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Harare(DstTzInfo):
'''Africa/Harare timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Harare'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1903,2,28,21,55,48),
]
_transition_info = [
i(7440,0,'LMT'),
i(7200,0,'CAT'),
]
Harare = Harare()
| newvem/pytz | pytz/zoneinfo/Africa/Harare.py | Python | mit | 482 |
from django import forms
from .models import Post, Comment
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'text',)
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('author', 'text',)
| maciek263/django2 | blog/forms.py | Python | mit | 253 |
import System
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
def CollectByView(bic, view):
collector = FilteredElementCollector(doc)
filter = ElementOwnerViewFilter(view.Id)
return collector.WherePasses(filter).OfCategory(bic).ToElements()
def GetViewDependentElements(cat, views):
if isinstance(views, list): return [CollectByView(cat, x) for x in UnwrapElement(views)]
else: return CollectByView(cat, UnwrapElement(views))
doc = DocumentManager.Instance.CurrentDBDocument
cats = IN[0]
views = IN[1]
if isinstance(IN[0], list): OUT = [GetViewDependentElements(x, views) for x in cats]
else: OUT = GetViewDependentElements(cats, views) | andydandy74/ClockworkForDynamo | nodes/2.x/python/All View-Dependent Family Instances Of Category.py | Python | mit | 779 |
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase, Client
from multitest.models import Test, Question, Answer
class MultitestViewsTests(TestCase):
def setUp(self):
self.user = User.objects.create(username='user', is_active=True, is_staff=False, is_superuser=False)
self.user.set_password('user')
self.user.save()
self.c = Client()
self.c.login(username='user', password='user')
self.stest = Test.objects.create(title='Test, test')
self.squestion = Question.objects.create(question='Is that true?', test=self.stest)
self.sanswer = Answer.objects.create(answer='Yes', is_correct=True, question=self.squestion)
self.sanswer2 = Answer.objects.create(answer='No', is_correct=False, question=self.squestion)
def test_views_guest_access(self):
guest = Client()
response = guest.get(reverse('index'))
self.assertTemplateUsed(response, 'multitest/index.html')
response = guest.get(reverse('login'))
self.assertTemplateUsed(response, 'multitest/login.html')
response = guest.get(reverse('register'))
self.assertTemplateUsed(response, 'multitest/register.html')
def test_only_users_access(self):
guest = Client()
response = guest.get(reverse('test', kwargs={'test_id': self.stest.id}))
self.assertTemplateNotUsed(response, 'multitest/test.html')
def test_list_all_tests(self):
response = self.c.get(reverse('index'))
self.failUnlessEqual(response.status_code, 200)
self.assertContains(response, self.stest.title)
def test_display_test(self):
response = self.c.get(reverse('test', kwargs={'test_id': self.stest.id}))
self.failUnlessEqual(response.status_code, 200)
self.assertContains(response, self.squestion.question)
self.assertContains(response, self.sanswer.answer)
self.assertContains(response, self.sanswer2.answer)
def test_display_correct_result(self):
response = self.c.post(reverse('test', kwargs={'test_id': self.stest.id}), {
'question'+str(self.squestion.id)+'[]': self.sanswer.id,
})
self.assertEqual(response.context['points'], 1)
self.assertEqual(response.context['max_points'], 1)
response = self.c.post(reverse('test', kwargs={'test_id': self.stest.id}), {
'question'+str(self.squestion.id)+'[]': self.sanswer2.id,
})
self.assertEqual(response.context['points'], 0)
self.assertEqual(response.context['max_points'], 1)
response = self.c.post(reverse('test', kwargs={'test_id': self.stest.id}), {
'question'+str(self.squestion.id)+'[]': (self.sanswer2.id, self.sanswer.id),
})
self.assertEqual(response.context['points'], 0)
self.assertEqual(response.context['max_points'], 1)
def test_user_login(self):
guest = Client()
response = guest.post(reverse('login'), {'login': 'user', 'password': 'user'})
self.failUnless(response.status_code, 302)
response = guest.get(reverse('index'))
self.assertContains(response, 'Wyloguj')
def test_user_register(self):
users = User.objects.count()
guest = Client()
response = guest.post(reverse('register'),
{'login': 'test2', 'email': 'test2@wp.pl', 'password': 'test2', 'verify': 'warszawa'})
self.failUnless(response.status_code, 302)
self.assertEqual(User.objects.count(), users+1) | adiq/MultitestApp | multitest/tests.py | Python | mit | 3,600 |
from rest_framework import viewsets, filters
from rest_framework.decorators import list_route, detail_route
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAdminUser
from django.utils import timezone
from django_filters.rest_framework import DjangoFilterBackend
from dateutil.parser import parse
import logging
from valhalla.proposals.models import Proposal, Semester, TimeAllocation
from valhalla.userrequests.models import UserRequest, Request, DraftUserRequest
from valhalla.userrequests.filters import UserRequestFilter, RequestFilter
from valhalla.userrequests.cadence import expand_cadence_request
from valhalla.userrequests.serializers import RequestSerializer, UserRequestSerializer
from valhalla.userrequests.serializers import DraftUserRequestSerializer, CadenceRequestSerializer
from valhalla.userrequests.duration_utils import (get_request_duration_dict, get_max_ipp_for_userrequest,
OVERHEAD_ALLOWANCE)
from valhalla.userrequests.state_changes import InvalidStateChange, TERMINAL_STATES
from valhalla.userrequests.request_utils import (get_airmasses_for_request_at_sites,
get_telescope_states_for_request)
logger = logging.getLogger(__name__)
class UserRequestViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticatedOrReadOnly,)
http_method_names = ['get', 'post', 'head', 'options']
serializer_class = UserRequestSerializer
filter_class = UserRequestFilter
filter_backends = (
filters.OrderingFilter,
DjangoFilterBackend
)
ordering = ('-id',)
def get_throttles(self):
actions_to_throttle = ['cancel', 'validate', 'create']
if self.action in actions_to_throttle:
self.throttle_scope = 'userrequests.' + self.action
return super().get_throttles()
def get_queryset(self):
if self.request.user.is_staff:
qs = UserRequest.objects.all()
elif self.request.user.is_authenticated:
qs = UserRequest.objects.filter(
proposal__in=self.request.user.proposal_set.all()
)
else:
qs = UserRequest.objects.filter(proposal__in=Proposal.objects.filter(public=True))
return qs.prefetch_related(
'requests', 'requests__windows', 'requests__molecules', 'requests__constraints',
'requests__target', 'requests__location'
)
def perform_create(self, serializer):
serializer.save(submitter=self.request.user)
@list_route(methods=['get'], permission_classes=(IsAdminUser,))
def schedulable_requests(self, request):
'''
Gets the set of schedulable User requests for the scheduler, should be called right after isDirty finishes
Needs a start and end time specified as the range of time to get requests in. Usually this is the entire
semester for a scheduling run.
'''
current_semester = Semester.current_semesters().first()
start = parse(request.query_params.get('start', str(current_semester.start))).replace(tzinfo=timezone.utc)
end = parse(request.query_params.get('end', str(current_semester.end))).replace(tzinfo=timezone.utc)
# Schedulable requests are not in a terminal state, are part of an active proposal,
# and have a window within this semester
queryset = UserRequest.objects.exclude(state__in=TERMINAL_STATES).filter(
requests__windows__start__lte=end, requests__windows__start__gte=start,
proposal__active=True).prefetch_related('requests', 'requests__windows', 'requests__target', 'proposal',
'proposal__timeallocation_set', 'requests__molecules', 'submitter',
'requests__location', 'requests__constraints').distinct()
# queryset now contains all the schedulable URs and their associated requests and data
# Check that each request time available in its proposal still
ur_data = []
tas = {}
for ur in queryset.all():
total_duration_dict = ur.total_duration
for tak, duration in total_duration_dict.items():
if (tak, ur.proposal.id) in tas:
time_allocation = tas[(tak, ur.proposal.id)]
else:
time_allocation = TimeAllocation.objects.get(
semester=tak.semester,
instrument_name=tak.instrument_name,
telescope_class=tak.telescope_class,
proposal=ur.proposal.id,
)
tas[(tak, ur.proposal.id)] = time_allocation
if ur.observation_type == UserRequest.NORMAL:
time_left = time_allocation.std_allocation - time_allocation.std_time_used
else:
time_left = time_allocation.too_allocation - time_allocation.too_time_used
if time_left * OVERHEAD_ALLOWANCE >= (duration / 3600.0):
ur_data.append(ur.as_dict)
break
else:
logger.warning(
'not enough time left {0} in proposal {1} for ur {2} of duration {3}, skipping'.format(
time_left, ur.proposal.id, ur.id, (duration / 3600.0)
)
)
return Response(ur_data)
@detail_route(methods=['post'])
def cancel(self, request, pk=None):
ur = self.get_object()
try:
ur.state = 'CANCELED'
ur.save()
except InvalidStateChange as exc:
return Response({'errors': [str(exc)]}, status=400)
return Response(UserRequestSerializer(ur).data)
@list_route(methods=['post'])
def validate(self, request):
serializer = UserRequestSerializer(data=request.data, context={'request': request})
req_durations = {}
if serializer.is_valid():
req_durations = get_request_duration_dict(serializer.validated_data['requests'])
errors = {}
else:
errors = serializer.errors
return Response({'request_durations': req_durations,
'errors': errors})
@list_route(methods=['post'])
def max_allowable_ipp(self, request):
# change requested ipp to 1 because we want it to always pass the serializers ipp check
request.data['ipp_value'] = 1.0
serializer = UserRequestSerializer(data=request.data, context={'request': request})
if serializer.is_valid():
ipp_dict = get_max_ipp_for_userrequest(serializer.validated_data)
return Response(ipp_dict)
else:
return Response({'errors': serializer.errors})
@list_route(methods=['post'])
def cadence(self, request):
expanded_requests = []
for req in request.data.get('requests', []):
if isinstance(req, dict) and req.get('cadence'):
cadence_request_serializer = CadenceRequestSerializer(data=req)
if cadence_request_serializer.is_valid():
expanded_requests.extend(expand_cadence_request(cadence_request_serializer.validated_data))
else:
return Response(cadence_request_serializer.errors, status=400)
else:
expanded_requests.append(req)
# if we couldn't find any valid cadence requests, return that as an error
if not expanded_requests:
return Response({'errors': 'No visible requests within cadence window parameters'}, status=400)
# now replace the originally sent requests with the cadence requests and send it back
ret_data = request.data.copy()
ret_data['requests'] = expanded_requests
if(len(ret_data['requests']) > 1):
ret_data['operator'] = 'MANY'
ur_serializer = UserRequestSerializer(data=ret_data, context={'request': request})
if not ur_serializer.is_valid():
return Response(ur_serializer.errors, status=400)
return Response(ret_data)
class RequestViewSet(viewsets.ReadOnlyModelViewSet):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = RequestSerializer
filter_class = RequestFilter
filter_backends = (
filters.OrderingFilter,
DjangoFilterBackend
)
ordering = ('-id',)
ordering_fields = ('id', 'state', 'fail_count')
def get_queryset(self):
if self.request.user.is_staff:
return Request.objects.all()
elif self.request.user.is_authenticated:
return Request.objects.filter(
user_request__proposal__in=self.request.user.proposal_set.all()
)
else:
return Request.objects.filter(user_request__proposal__in=Proposal.objects.filter(public=True))
@detail_route()
def airmass(self, request, pk=None):
return Response(get_airmasses_for_request_at_sites(self.get_object().as_dict))
@detail_route()
def telescope_states(self, request, pk=None):
telescope_states = get_telescope_states_for_request(self.get_object())
str_telescope_states = {str(k): v for k, v in telescope_states.items()}
return Response(str_telescope_states)
@detail_route()
def blocks(self, request, pk=None):
blocks = self.get_object().blocks
if request.GET.get('canceled'):
return Response([b for b in blocks if not b['canceled']])
return Response(blocks)
class DraftUserRequestViewSet(viewsets.ModelViewSet):
serializer_class = DraftUserRequestSerializer
ordering = ('-modified',)
def perform_create(self, serializer):
serializer.save(author=self.request.user)
def get_queryset(self):
if self.request.user.is_authenticated:
return DraftUserRequest.objects.filter(proposal__in=self.request.user.proposal_set.all())
else:
return DraftUserRequest.objects.none()
| LCOGT/valhalla | valhalla/userrequests/viewsets.py | Python | gpl-3.0 | 10,206 |
# coding: utf-8
#
# drums-backend a simple interactive audio sampler that plays vorbis samples
# Copyright (C) 2009 C.D. Immanuel Albrecht
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from math import *
import random
import copy
class pgrammar(object):
"""a p-grammar is a grammar that works in a probabilistic way"""
def __init__(self, rules):
""" initialize the pgrammar object, where rules is a dictionary
mapping non-terminals to lists of (omega, p) tuples """
self.rules = rules
self.nonterminals = self.rules.keys()
self.p_sums = {}
self.lower_bounds = {}
self.random_maps = {}
for A in self.nonterminals:
total = sum( map( lambda x: x[1], self.rules[A] ) )
self.p_sums[A] = total
if total <= 0.0:
total = 1.0
self.lower_bounds[A] = []
bound = 0.0
boundmapstring = "0"
count = 0
for r in self.rules[A]:
self.lower_bounds[A].append(bound)
boundmapstring = repr(count) + " if (x>=" + repr(bound/total) \
+ ") else " + boundmapstring
bound += r[1]
count += 1
boundmapstring = "lambda x: " + boundmapstring
#print A,':',boundmapstring
self.random_maps[A] = lambda x,A=A,fn=eval(boundmapstring):\
self.rules[A][fn(x)][0]
def __call__(self, word):
tokens_list = filter(lambda x: x[1], word)
tokens = {}
for t in tokens_list:
T = repr(t)
if not T in tokens:
def rulemap(x):
if x[1]:
return (x[0],t[1]+[x[1]])
else:
return (x[0],[])
tokens[T] = map(rulemap, \
self.random_maps[t[0]](random.uniform(0.0,1.0)))
w = []
for x in word:
X = repr(x)
if X in tokens:
w.extend(tokens[X])
else: # make unknown non-terminals terminals
w.append((x[0],[]))
return w
def projectTerminals(word):
return map(lambda x: x[0], filter(lambda x: not x[1], word))
def hasNonTerminals(word):
for k in word:
if k[1]:
return True
return False
def definiteTerminals(word):
terms = []
for k in word:
if k[1]:
return terms
terms.append(k[0])
return terms
def countDefiniteTerminals(word):
count = 0
for k in word:
if k[1]:
return count
count += 1
return count
def makeRule(code):
""" make a rule data structure from code:
NONTERMINAL: *TERMINAL(NUMBER) ... *TERMINAL(NUMBER): PVALUE
where NUMBER=0 means terminal, and all nonterminals with
the same NUMBER refer to the same instances of nonterminals. """
c = map(lambda x: x.strip(), code.split(':'))
if len(c) == 2:
c.append("1.0")
if not len(c) == 3:
raise Exception("Wrong rule format: "+str(code))
lefthand = c[0].strip()
pvalue = float(c[2])
termnums = filter(lambda x:x, c[1].split(")"))
def int0(x):
try:
return int(x)
except ValueError:
return 0
def mk_sym(x):
s = x.split("(")
if not len(s) == 2:
raise Exception("Wrong bracket format: " + str(code))
return (s[0].strip(),int0(s[1]))
symbols = map(mk_sym, termnums)
righthand = [(symbols, pvalue)]
return {lefthand: righthand}
def makeWord(code):
termnums = filter(lambda x:x, code.split(")"))
def int0(x):
try:
return int(x)
except ValueError:
return 0
def lint0(x):
q = int0(x)
if q:
return [q]
else:
return []
def mk_sym(x):
s = x.split("(")
if not len(s) == 2:
raise Exception("Wrong bracket format: " + str(code))
return (s[0].strip(),lint0(s[1]))
symbols = map(mk_sym, termnums)
return symbols
def simplifyWord(word):
simplificator = {tuple(): []}
varc = 1
new_word = []
for x in word:
try:
v = simplificator[tuple(x[1])]
except KeyError:
v = simplificator[tuple(x[1])] = [varc]
varc += 1
new_word.append((x[0], v))
return new_word
def addRules(x, add):
for k in add:
x.setdefault(k,[]).extend(add[k])
def makeRules(rules, default=None):
rlist = filter(lambda x:x, map(lambda x:x.strip(), rules.split("\n")))
if default:
r = copy.deepcopy(default)
else:
r = {}
for x in rlist:
addRules(r, makeRule(x))
return r
if __name__ == '__main__':
rules = {}
rules['A'] = [([('A',1),('B',1),('A',1),('A',2)], 0.5),\
([('C',1)], 1.1)]
rules['B'] = [([('!',0)],0)]
rules['C'] = [([('_',0)],0)]
g = pgrammar(rules)
initial = [('A',[1]),('A',[])]
class neuron(object):
"""a neuron has an inner level, an input level and output synapses"""
def __init__(self, level=None, synapses=None, delta=None):
""" initialize a neuron with its level and output synapses-weighting """
if level:
self.level = level
else:
self.level = 0.5
if synapses:
self.synapses = synapses
else:
self.synapses = {}
#
# self.synapses maps function objects on sets of neurons
#
self.input = 0
if delta:
self.delta = delta
else:
self.delta = lambda x: min(1.0,max(0.0,(x+self.level)))
def react(self):
self.level = self.delta(self.input)
self.input = 0
def feed(self):
for fn in self.synapses:
y = fn(self.level)
for n in self.synapses[fn]:
n.input += y
class cluster(object):
"""a cluster is a set of neurons that may have synapse connections"""
def __init__(self, count=32, renormalization=None):
""" initialize a neuron cluster with count neurons """
self.neurons = [neuron() for i in range(count)]
self.renormalization = renormalization
def feedback(self):
for n in self.neurons:
n.feed()
for n in self.neurons:
n.react()
if self.renormalization:
factor = self.renormalization / sum(map(lambda n:n.level, self.neurons))
for n in self.neurons:
n.level *= factor
def shock(self, energy):
for n in self.neurons:
n.level += energy
def jolt(self, energy):
for n in self.neurons:
n.input += energy
def connect(self, i, fn, j):
self.neurons[i].synapses.setdefault(fn, set()).add(self.neurons[j])
def levels(self):
return map(lambda n: n.level, self.neurons)
def trigger_fn(limit,high,low=0.0):
def fn(x,limit=limit,high=high,low=low):
if x >= limit:
return high
return low
return fn
| immo/pyTOM | df/df_experimental.py | Python | gpl-3.0 | 7,868 |
"""OAuth 2.0 WSGI server middleware providing MyProxy certificates as access tokens
"""
__author__ = "R B Wilkinson"
__date__ = "12/12/11"
__copyright__ = "(C) 2011 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = "$Id$"
import httplib
import logging
from webob import Request
from ndg.oauth.server.lib.register.client import ClientRegister
from ndg.oauth.server.lib.register.client_authorization import (
ClientAuthorization, ClientAuthorizationRegister)
from ndg.oauth.server.lib.render.configuration import RenderingConfiguration
from ndg.oauth.server.lib.render.factory import callModuleObject
from ndg.oauth.server.lib.render.renderer_interface import RendererInterface
log = logging.getLogger(__name__)
class Oauth2AuthorizationMiddleware(object):
"""Middleware to handle user/resource owner authorization of clients within
a session.
On each invocation, sets the current authorizations in the WSGI environ.
At a specific URL, provides a simple form for a user to set an authorization
decision.
"""
CLIENT_AUTHORIZATIONS_SESSION_KEY = 'oauth2_client_authorizations'
SESSION_CALL_CONTEXT_KEY = 'oauth2_client_authorizations_context'
PARAM_PREFIX = 'oauth2authorization.'
LAYOUT_PREFIX = 'layout.'
# Configuration options
BASE_URL_PATH_OPTION = 'base_url_path'
CLIENT_AUTHORIZATION_FORM_OPTION = 'client_authorization_form'
CLIENT_AUTHORIZATIONS_KEY_OPTION = 'client_authorizations_key'
CLIENT_REGISTER_OPTION = 'client_register'
RENDERER_CLASS_OPTION = 'renderer_class'
SESSION_KEY_OPTION = 'session_key_name'
USER_IDENTIFIER_KEY_OPTION = 'user_identifier_key'
method = {
'/authorize': 'authorize',
'/client_auth': 'client_auth'
}
# Configuration option defaults
PROPERTY_DEFAULTS = {
BASE_URL_PATH_OPTION: 'client_authorization',
RENDERER_CLASS_OPTION: \
'ndg.oauth.server.lib.render.genshi_renderer.GenshiRenderer',
SESSION_KEY_OPTION: 'beaker.session.oauth2authorization',
CLIENT_AUTHORIZATIONS_KEY_OPTION: 'client_authorizations',
USER_IDENTIFIER_KEY_OPTION: 'REMOTE_USER'
}
LAYOUT_PARAMETERS = ['heading',
'title',
'message',
'leftLogo',
'leftAlt',
'leftImage',
'leftLink',
'rightAlt',
'rightImage',
'rightLink',
'footerText',
'helpIcon']
def __init__(self, app, app_conf, prefix=PARAM_PREFIX, **local_conf):
"""
Sets up the server depending on the configuration.
@type app: WSGI application
@param app: wrapped application/middleware
@type app_conf: dict
@param app_conf: application configuration settings - ignored - this
method includes this arg to fit Paste middleware / app function
signature
@type prefix: str
@param prefix: optional prefix for parameter names included in the
local_conf dict - enables these parameters to be filtered from others
which don't apply to this middleware
@type local_conf: dict
@param local_conf: attribute settings to apply
"""
self._app = app
self._renderingConfiguration = RenderingConfiguration(
self.LAYOUT_PARAMETERS,
prefix + self.LAYOUT_PREFIX,
local_conf)
self._set_configuration(prefix, local_conf)
self.client_register = ClientRegister(self.client_register_file)
self.renderer = callModuleObject(self.renderer_class,
objectName=None, moduleFilePath=None,
objectType=RendererInterface,
objectArgs=None, objectProperties=None)
def __call__(self, environ, start_response):
"""
@type environ: dict
@param environ: WSGI environment
@type start_response:
@param start_response: WSGI start response function
@rtype: iterable
@return: WSGI response
"""
log.debug("Oauth2AuthorizationMiddleware.__call__ ...")
req = Request(environ)
# Get session.
session = environ.get(self.session_env_key)
if session is None:
raise Exception(
'Oauth2AuthorizationMiddleware.__call__: No beaker session key '
'"%s" found in environ' % self.session_env_key)
# Determine what operation the URL specifies.
actionPath = None
log.debug("Request path_info: %s", req.path_info)
if req.path_info.startswith(self.base_path):
actionPath = req.path_info[len(self.base_path):]
methodName = self.__class__.method.get(actionPath, '')
if methodName:
log.debug("Method: %s" % methodName)
action = getattr(self, methodName)
return action(req, session, start_response)
elif self._app is not None:
log.debug("Delegating to lower filter/application.")
self._set_client_authorizations_in_environ(session, environ)
return self._app(environ, start_response)
else:
response = "OAuth 2.0 Authorization Filter - Invalid URL"
start_response(self._get_http_status_string(httplib.NOT_FOUND),
[('Content-type', 'text/plain'),
('Content-length', str(len(response)))
])
return [response]
def _set_client_authorizations_in_environ(self, session, environ):
"""
Sets the current authorizations currently granted by the user in
environ,
@type session: Beaker SessionObject
@param session: session data
@type environ: dict
@param environ: WSGI environment
"""
client_authorizations = session.get(self.CLIENT_AUTHORIZATIONS_SESSION_KEY)
if client_authorizations:
log.debug("_set_client_authorizations_in_environ %r",
client_authorizations)
environ[self.client_authorizations_env_key] = client_authorizations
else:
log.debug("%s not found in session",
self.CLIENT_AUTHORIZATIONS_SESSION_KEY)
def authorize(self, req, session, start_response):
"""
Checks whether the user has already authorized the client and if not
displays the authorization form.
@type req: webob.Request
@param req: HTTP request object
@type session: Beaker SessionObject
@param session: session data
@type start_response:
@param start_response: WSGI start response function
@rtype: iterable
@return: WSGI response
"""
client_id = req.params.get('client_id')
scope = req.params.get('scope')
user = req.params.get('user')
original_url = req.params.get('original_url')
log.debug("Client authorization request for client_id: %s scope: %s "
"user: %s", client_id, scope, user)
client_authorizations = session.get(
self.CLIENT_AUTHORIZATIONS_SESSION_KEY)
client_authorized = None
if client_authorizations:
client_authorized = \
client_authorizations.is_client_authorized_by_user(user,
client_id,
scope)
if client_authorized is None:
# No existing decision - let user decide.
session[self.SESSION_CALL_CONTEXT_KEY] = {
'original_url': original_url,
'client_id': client_id,
'scope': scope,
'user': user
}
session.save()
log.debug("Client not authorized by user - returning authorization "
"form.")
return self._client_auth_form(client_id, scope, req, start_response)
else:
log.debug("Client already %s authorization by user.",
("granted" if client_authorized else "denied"))
log.debug("Redirecting to %s", original_url)
return self._redirect(original_url, start_response)
def _client_auth_form(self, client_id, scope, req, start_response):
"""
Returns a form for the user to enter an authorization decision.
@type client_id: str
@param client_id: client identifier as set in the client register
@type scope: str
@param scope: authorization scope
@type req: webob.Request
@param req: HTTP request object
@type start_response:
@param start_response: WSGI start response function
@rtype: iterable
@return: WSGI response
"""
client = self.client_register.register.get(client_id)
if client is None:
# Client ID is not registered.
log.error("OAuth client of ID %s is not registered with the server",
client_id)
response = (
"OAuth client of ID %s is not registered with the server" %
client_id)
else:
submit_url = req.application_url + self.base_path + '/client_auth'
c = {'client_name': client.name,
'client_id': client_id,
'scope': scope,
'submit_url': submit_url,
'baseURL': req.application_url}
response = self.renderer.render(self.client_authorization_form,
self._renderingConfiguration.merged_parameters(c))
start_response(self._get_http_status_string(httplib.OK),
[('Content-type', 'text/html'),
('Content-length', str(len(response)))
])
return [response]
def client_auth(self, req, session, start_response):
"""
@type req: webob.Request
@param req: HTTP request object
@type session: Beaker SessionObject
@param session: session data
@type start_response:
@param start_response: WSGI start response function
@rtype: iterable
@return: WSGI response
"""
call_context = session.get(self.SESSION_CALL_CONTEXT_KEY)
if not call_context:
log.error("No session context.")
response = 'Internal server error'
status_str = self._get_http_status_string(
httplib.INTERNAL_SERVER_ERROR)
start_response(status_str,
[('Content-type', 'text/html'),
('Content-length', str(len(response)))
])
return [response]
if 'submit' in req.params and 'cancel' not in req.params:
log.debug("User authorized client.")
granted = True
else:
log.debug("User declined authorization for client.")
granted = False
# Add authorization to those for the user.
client_authorizations = session.setdefault(
self.CLIENT_AUTHORIZATIONS_SESSION_KEY,
ClientAuthorizationRegister())
client_id = call_context['client_id']
scope = call_context['scope']
user = call_context['user']
log.debug("Adding client authorization for client_id: %s scope: %s "
"user: %s", client_id, scope, user)
client_authorization = ClientAuthorization(user, client_id, scope,
granted)
client_authorizations.add_client_authorization(client_authorization)
session[self.CLIENT_AUTHORIZATIONS_SESSION_KEY] = client_authorizations
log.debug("### client_auth: %r", client_authorizations)
session.save()
original_url = call_context['original_url']
log.debug("Redirecting to %s", original_url)
return self._redirect(original_url, start_response)
def _set_configuration(self, prefix, local_conf):
"""Sets the configuration values.
@type prefix: str
@param prefix: optional prefix for parameter names included in the
local_conf dict - enables these parameters to be filtered from others
which don't apply to this middleware
@type local_conf: dict
@param local_conf: attribute settings to apply
"""
cls = self.__class__
self.base_path = cls._get_config_option(
prefix,
local_conf,
cls.BASE_URL_PATH_OPTION)
self.client_register_file = cls._get_config_option(
prefix,
local_conf,
cls.CLIENT_REGISTER_OPTION)
self.renderer_class = cls._get_config_option(
prefix,
local_conf,
cls.RENDERER_CLASS_OPTION)
self.session_env_key = cls._get_config_option(
prefix, local_conf,
cls.SESSION_KEY_OPTION)
self.client_authorization_form = cls._get_config_option(
prefix,
local_conf,
cls.CLIENT_AUTHORIZATION_FORM_OPTION)
self.client_authorizations_env_key = cls._get_config_option(prefix,
local_conf,
cls.CLIENT_AUTHORIZATIONS_KEY_OPTION)
self.user_identifier_env_key = cls._get_config_option(
prefix,
local_conf,
cls.USER_IDENTIFIER_KEY_OPTION)
@staticmethod
def _get_http_status_string(status):
return ("%d %s" % (status, httplib.responses[status]))
def _redirect(self, url, start_response):
log.debug("Redirecting to %s", url)
start_response(self._get_http_status_string(httplib.FOUND),
[('Location', url.encode('ascii', 'ignore'))])
return []
@classmethod
def _get_config_option(cls, prefix, local_conf, key):
value = local_conf.get(prefix + key, cls.PROPERTY_DEFAULTS.get(key, None))
log.debug("Oauth2AuthorizationMiddleware configuration %s=%s", key, value)
return value
@classmethod
def filter_app_factory(cls, app, app_conf, **local_conf):
return cls(app, app_conf, **local_conf)
| TheLanguageArchive/ndg_oauth | ndg_oauth_server/ndg/oauth/server/wsgi/authorization_filter.py | Python | bsd-3-clause | 15,524 |
"""Consolation: Repuge-NG's display frontend library.
Includes the XML-RPC code for multiple interfaces.
This may be useful even if Ludicrous is not used.
"""
__copying__="""
Written by Thomas Hori
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/."""
| thomas-hori/Repuge-NG | consolation/__init__.py | Python | mpl-2.0 | 409 |
# $Id$
#
# Copyright (C) 2002-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the signatures
"""
from __future__ import print_function
import unittest
import os
from rdkit.six import next
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Chem.Pharm2D import Gobbi_Pharm2D, Generate
class TestCase(unittest.TestCase):
def setUp(self):
self.factory = Gobbi_Pharm2D.factory
def test1Sigs(self):
probes = [
('OCCC=O', {
'HA': (1, ((0, ), (4, ))),
'HD': (1, ((0, ), )),
'LH': (0, None),
'AR': (0, None),
'RR': (0, None),
'X': (0, None),
'BG': (0, None),
'AG': (0, None),
}),
('OCCC(=O)O', {
'HA': (1, ((0, ), (4, ))),
'HD': (1, ((0, ), (5, ))),
'LH': (0, None),
'AR': (0, None),
'RR': (0, None),
'X': (0, None),
'BG': (0, None),
'AG': (1, ((3, ), )),
}),
('CCCN', {
'HA': (1, ((3, ), )),
'HD': (1, ((3, ), )),
'LH': (0, None),
'AR': (0, None),
'RR': (0, None),
'X': (0, None),
'BG': (1, ((3, ), )),
'AG': (0, None),
}),
('CCCCC', {
'HA': (0, None),
'HD': (0, None),
'LH': (1, ((1, ), (3, ))),
'AR': (0, None),
'RR': (0, None),
'X': (0, None),
'BG': (0, None),
'AG': (0, None),
}),
('CC1CCC1', {
'HA': (0, None),
'HD': (0, None),
'LH': (1, ((1, ), (3, ))),
'AR': (0, None),
'RR': (1, ((1, ), )),
'X': (0, None),
'BG': (0, None),
'AG': (0, None),
}),
('[SiH3]C1CCC1', {
'HA': (0, None),
'HD': (0, None),
'LH': (1, ((1, ), )),
'AR': (0, None),
'RR': (1, ((1, ), )),
'X': (1, ((0, ), )),
'BG': (0, None),
'AG': (0, None),
}),
('[SiH3]c1ccccc1', {
'HA': (0, None),
'HD': (0, None),
'LH': (0, None),
'AR': (1, ((1, ), )),
'RR': (0, None),
'X': (1, ((0, ), )),
'BG': (0, None),
'AG': (0, None),
}),
]
for smi, d in probes:
mol = Chem.MolFromSmiles(smi)
feats = self.factory.featFactory.GetFeaturesForMol(mol)
for k in d.keys():
shouldMatch, mapList = d[k]
feats = self.factory.featFactory.GetFeaturesForMol(mol, includeOnly=k)
if shouldMatch:
self.assertTrue(feats)
self.assertEqual(len(feats), len(mapList))
aids = [(x.GetAtomIds()[0], ) for x in feats]
aids.sort()
self.assertEqual(tuple(aids), mapList)
def test2Sigs(self):
probes = [('O=CCC=O', (149, )),
('OCCC=O', (149, 156)),
('OCCC(=O)O', (22, 29, 149, 154, 156, 184, 28822, 30134)), ]
for smi, tgt in probes:
sig = Generate.Gen2DFingerprint(Chem.MolFromSmiles(smi), self.factory)
self.assertEqual(len(sig), 39972)
bs = tuple(sig.GetOnBits())
self.assertEqual(len(bs), len(tgt))
self.assertEqual(bs, tgt)
def testOrderBug(self):
sdFile = os.path.join(RDConfig.RDCodeDir, 'Chem', 'Pharm2D', 'test_data', 'orderBug.sdf')
suppl = Chem.SDMolSupplier(sdFile)
m1 = next(suppl)
m2 = next(suppl)
sig1 = Generate.Gen2DFingerprint(m1, self.factory)
sig2 = Generate.Gen2DFingerprint(m2, self.factory)
ob1 = set(sig1.GetOnBits())
ob2 = set(sig2.GetOnBits())
self.assertEqual(sig1, sig2)
def testOrderBug2(self):
from rdkit.Chem import Randomize
from rdkit import DataStructs
probes = ['Oc1nc(Oc2ncccc2)ccc1']
for smi in probes:
m1 = Chem.MolFromSmiles(smi)
#m1.Debug()
sig1 = Generate.Gen2DFingerprint(m1, self.factory)
csmi = Chem.MolToSmiles(m1)
m2 = Chem.MolFromSmiles(csmi)
#m2.Debug()
sig2 = Generate.Gen2DFingerprint(m2, self.factory)
self.assertTrue(list(sig1.GetOnBits()) == list(sig2.GetOnBits()), '%s %s' % (smi, csmi))
self.assertEqual(DataStructs.DiceSimilarity(sig1, sig2), 1.0)
self.assertEqual(sig1, sig2)
for i in range(10):
m2 = Randomize.RandomizeMol(m1)
sig2 = Generate.Gen2DFingerprint(m2, self.factory)
if sig2 != sig1:
Generate._verbose = True
print('----------------')
sig1 = Generate.Gen2DFingerprint(m1, self.factory)
print('----------------')
sig2 = Generate.Gen2DFingerprint(m2, self.factory)
print('----------------')
print(Chem.MolToMolBlock(m1))
print('----------------')
print(Chem.MolToMolBlock(m2))
print('----------------')
s1 = set(sig1.GetOnBits())
s2 = set(sig2.GetOnBits())
print(s1.difference(s2))
self.assertEqual(sig1, sig2)
def testBitInfo(self):
m = Chem.MolFromSmiles('OCC=CC(=O)O')
bi = {}
sig = Generate.Gen2DFingerprint(m, Gobbi_Pharm2D.factory, bitInfo=bi)
self.assertEqual(sig.GetNumOnBits(), len(bi))
self.assertEqual(list(sig.GetOnBits()), sorted(bi.keys()))
self.assertEqual(sorted(bi.keys()), [23, 30, 150, 154, 157, 185, 28878, 30184])
self.assertEqual(sorted(bi[28878]), [[(0, ), (5, ), (6, )]])
self.assertEqual(sorted(bi[157]), [[(0, ), (6, )], [(5, ), (0, )]])
if __name__ == '__main__':
unittest.main()
| jandom/rdkit | rdkit/Chem/Pharm2D/UnitTestGobbi.py | Python | bsd-3-clause | 5,595 |
# -*- coding: utf-8 -*-
@bot.message_handler(commands=['date', 'time', 'Date', 'Time'])
def time_message(message):
userlang = redisserver.get("settings:user:language:" + str(message.from_user.id))
userid = message.from_user.id
banlist = redisserver.sismember('zigzag_banlist', '{}'.format(userid))
if banlist:
return
if len(message.text.split()) < 2:
bot.reply_to(message, language[userlang]["TIME_NEA_MSG"], parse_mode="Markdown")
return
city = message.text.split()[1]
try:
tzd = json.load(urllib.urlopen("https://maps.googleapis.com/maps/api/geocode/json?address={}".format(city)))
if str(tzd["status"]) == "OK":
latlng = tzd["results"][0]["geometry"]["location"]
lat = str(latlng["lat"])
lng = str(latlng["lng"])
tzl = json.load(urllib.urlopen("https://maps.googleapis.com/maps/api/timezone/json?location={}×tamp=1331161200".format(lat + "," + lng)))
timezone = tzl["timeZoneId"]
else:
bot.reply_to(message, "Timezone not found.")
return
except:
print("[Time] Exception occured")
return
# bot.send_chat_action(message.chat.id, "typing")
# tm.sleep(1)
time = json.load(urllib.urlopen("https://script.google.com/macros/s/AKfycbyd5AcbAnWi2Yn0xhFRbyzS4qMq1VucMVgVvhul5XqS9HkAyJY/exec?tz={}".format(timezone)))
bot.send_message(message.chat.id, "Current time in *" + timezone + "*: \n" + time["fulldate"], parse_mode="Markdown")
| WebShark025/TheZigZagProject | plugins/time.py | Python | mit | 1,429 |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To keep backwards compatibility
from traceml.visualization.run_plot import MultiRunPlot, RunPlot
| polyaxon/polyaxon | core/polyaxon/polyplot/__init__.py | Python | apache-2.0 | 705 |
from flask_restful import Resource
from oeda.controller.experiment_results import get_all_stage_data
import matplotlib.pyplot as plt
import traceback
import json
import numpy as np
from io import BytesIO
import statsmodels.api as sm
import base64
from oeda.databases import db
# https://www.pythonanywhere.com/forums/topic/5017/
# https://stackoverflow.com/questions/38061267/matplotlib-graphic-image-to-base64
class QQPlotController(Resource):
availableScales = ["normal", "log"]
def get(self, experiment_id, stage_no, distribution, scale):
try:
if str(scale).lower() not in self.availableScales:
return {"error": "Provided scale is not supported"}, 404
pts = []
# this case corresponds to all stage data
if int(stage_no) == -1:
stages_and_data_points = get_all_stage_data(experiment_id=experiment_id)
if stages_and_data_points is None:
return {"error": "Data points cannot be retrieved for given experiment and/or stage"}, 404
for entity in stages_and_data_points:
entity = json.loads(entity)
if len(entity['values']) == 0:
pass
for data_point in entity['values']:
pts.append(data_point["payload"]["overhead"])
else:
data_points = db().get_data_points(experiment_id=experiment_id, stage_no=stage_no)
if data_points is None:
return {"error": "Data points cannot be retrieved for given experiment and/or stage"}, 404
for data_point in data_points:
pts.append(data_point["payload"]["overhead"])
# create the qq plot based on the retrieved data against normal distribution
array = np.asarray(pts)
sorted_array = np.sort(array)
if str(scale).lower() == "log":
sorted_array = np.log(sorted_array)
fig1 = sm.qqplot(sorted_array, dist=str(distribution).lower(), line='45', fit=True)
buf1 = BytesIO()
fig1.savefig(buf1, format='png')
buf1.seek(0)
figure_data_png = base64.b64encode(buf1.getvalue())
buf1.close()
fig1.clf()
del fig1
plt.close('all')
return figure_data_png
except Exception as e:
tb = traceback.format_exc()
print(tb)
return {"message": e.message}, 40
| Starofall/OEDA | Backend/oeda/controller/plotting.py | Python | mit | 2,553 |
#!/usr/bin/python3
#
# Copyright (c) 2013 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import sys
import setproctitle
def set_procname(name: str = os.path.basename(sys.argv[0])):
"""Attempts to set the current process-name to the given name."""
setproctitle.setproctitle(name)
| MikkelSchubert/paleomix | paleomix/common/system.py | Python | mit | 1,350 |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
result = []
if root == None:
return result
parentNodes = [root];
while (len(parentNodes) > 0):
levelVals = []
nextParentNodes = []
for i in parentNodes:
levelVals.append(i.val)
if (i.left != None):
nextParentNodes.append(i.left)
if (i.right != None):
nextParentNodes.append(i.right)
result.append(levelVals)
parentNodes = nextParentNodes
return result
| jeremykid/FunAlgorithm | python_practice/leetCode/leetcode-102.py | Python | mit | 819 |
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.utils import net
from oslo_config import cfg
from oslo_service import wsgi
from neutron._i18n import _
from neutron.common import constants
core_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.PortOpt('bind_port', default=9696,
help=_("The port to bind to")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions. "
"Note that this can be a colon-separated list of paths. "
"For example: api_extensions_path = "
"extensions:/path/to/more/exts:/even/more/exts. "
"The __path__ of neutron.extensions is appended to "
"this, so if your extensions are in there you don't "
"need to specify them here.")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('core_plugin',
help=_("The core plugin Neutron will use")),
cfg.ListOpt('service_plugins', default=[],
help=_("The service plugins Neutron will use")),
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
help=_("The base MAC address Neutron will use for VIFs. "
"The first 3 octets will remain unchanged. If the 4th "
"octet is not 00, it will also be used. The others "
"will be randomly generated.")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.BoolOpt('allow_pagination', default=True,
deprecated_for_removal=True,
help=_("Allow the usage of the pagination. This option has "
"been deprecated and will now be enabled "
"unconditionally.")),
cfg.BoolOpt('allow_sorting', default=True,
deprecated_for_removal=True,
help=_("Allow the usage of the sorting. This option has been "
"deprecated and will now be enabled unconditionally.")),
cfg.StrOpt('pagination_max_limit', default="-1",
help=_("The maximum number of items returned in a single "
"response, value was 'infinite' or negative integer "
"means no limit")),
cfg.ListOpt('default_availability_zones', default=[],
help=_("Default value of availability zone hints. The "
"availability zone aware schedulers use this when "
"the resources availability_zone_hints is empty. "
"Multiple availability zones can be specified by a "
"comma separated string. This value can be empty. "
"In this case, even if availability_zone_hints for "
"a resource is empty, availability zone is "
"considered for high availability while scheduling "
"the resource.")),
cfg.IntOpt('max_dns_nameservers', default=5,
help=_("Maximum number of DNS nameservers per subnet")),
cfg.IntOpt('max_subnet_host_routes', default=20,
help=_("Maximum number of host routes per subnet")),
cfg.IntOpt('max_fixed_ips_per_port', default=5,
deprecated_for_removal=True,
help=_("Maximum number of fixed ips per port. This option "
"is deprecated and will be removed in the Ocata "
"release.")),
cfg.BoolOpt('ipv6_pd_enabled', default=False,
help=_("Enables IPv6 Prefix Delegation for automatic subnet "
"CIDR allocation. "
"Set to True to enable IPv6 Prefix Delegation for "
"subnet allocation in a PD-capable environment. Users "
"making subnet creation requests for IPv6 subnets "
"without providing a CIDR or subnetpool ID will be "
"given a CIDR via the Prefix Delegation mechanism. "
"Note that enabling PD will override the behavior of "
"the default IPv6 subnetpool.")),
cfg.IntOpt('dhcp_lease_duration', default=86400,
help=_("DHCP lease duration (in seconds). Use -1 to tell "
"dnsmasq to use infinite lease times.")),
cfg.StrOpt('dns_domain',
default='openstacklocal',
help=_('Domain to use for building the hostnames')),
cfg.StrOpt('external_dns_driver',
help=_('Driver for external DNS integration.')),
cfg.BoolOpt('dhcp_agent_notification', default=True,
help=_("Allow sending resource operation"
" notification to DHCP agent")),
cfg.BoolOpt('allow_overlapping_ips', default=False,
help=_("Allow overlapping IP support in Neutron. "
"Attention: the following parameter MUST be set to "
"False if Neutron is being used in conjunction with "
"Nova security groups.")),
cfg.StrOpt('host', default=net.get_hostname(),
sample_default='example.domain',
help=_("Hostname to be used by the Neutron server, agents and "
"services running on this machine. All the agents and "
"services running on this machine must use the same "
"host value.")),
cfg.BoolOpt('notify_nova_on_port_status_changes', default=True,
help=_("Send notification to nova when port status changes")),
cfg.BoolOpt('notify_nova_on_port_data_changes', default=True,
help=_("Send notification to nova when port data (fixed_ips/"
"floatingip) changes so nova can update its cache.")),
cfg.IntOpt('send_events_interval', default=2,
help=_('Number of seconds between sending events to nova if '
'there are any events to send.')),
cfg.BoolOpt('advertise_mtu', default=True,
deprecated_for_removal=True,
help=_('If True, advertise network MTU values if core plugin '
'calculates them. MTU is advertised to running '
'instances via DHCP and RA MTU options.')),
cfg.StrOpt('ipam_driver', default='internal',
help=_("Neutron IPAM (IP address management) driver to use. "
"By default, the reference implementation of the "
"Neutron IPAM driver is used.")),
cfg.BoolOpt('vlan_transparent', default=False,
help=_('If True, then allow plugins that support it to '
'create VLAN transparent networks.')),
cfg.StrOpt('web_framework', default='legacy',
choices=('legacy', 'pecan'),
help=_("This will choose the web framework in which to run "
"the Neutron API server. 'pecan' is a new experimental "
"rewrite of the API server.")),
cfg.IntOpt('global_physnet_mtu', default=constants.DEFAULT_NETWORK_MTU,
deprecated_name='segment_mtu', deprecated_group='ml2',
help=_('MTU of the underlying physical network. Neutron uses '
'this value to calculate MTU for all virtual network '
'components. For flat and VLAN networks, neutron uses '
'this value without modification. For overlay networks '
'such as VXLAN, neutron automatically subtracts the '
'overlay protocol overhead from this value. Defaults '
'to 1500, the standard value for Ethernet.'))
]
core_cli_opts = [
cfg.StrOpt('state_path',
default='/var/lib/neutron',
help=_("Where to store Neutron state files. "
"This directory must be writable by the agent.")),
]
def register_core_common_config_opts(cfg=cfg.CONF):
cfg.register_opts(core_opts)
cfg.register_cli_opts(core_cli_opts)
wsgi.register_opts(cfg)
NOVA_CONF_SECTION = 'nova'
nova_opts = [
cfg.StrOpt('region_name',
help=_('Name of nova region to use. Useful if keystone manages'
' more than one region.')),
cfg.StrOpt('endpoint_type',
default='public',
choices=['public', 'admin', 'internal'],
help=_('Type of the nova endpoint to use. This endpoint will'
' be looked up in the keystone catalog and should be'
' one of public, internal or admin.')),
]
def register_nova_opts(cfg=cfg.CONF):
cfg.register_opts(nova_opts, group=NOVA_CONF_SECTION)
| cloudbase/neutron | neutron/conf/common.py | Python | apache-2.0 | 9,557 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from mock import call
from osc_lib.cli import format_columns
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
from openstackclient.tests.unit import utils as tests_utils
from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
from openstackclient.volume.v2 import volume_type
class TestType(volume_fakes.TestVolume):
def setUp(self):
super(TestType, self).setUp()
self.types_mock = self.app.client_manager.volume.volume_types
self.types_mock.reset_mock()
self.types_access_mock = (
self.app.client_manager.volume.volume_type_access)
self.types_access_mock.reset_mock()
self.encryption_types_mock = (
self.app.client_manager.volume.volume_encryption_types)
self.encryption_types_mock.reset_mock()
self.projects_mock = self.app.client_manager.identity.projects
self.projects_mock.reset_mock()
class TestTypeCreate(TestType):
project = identity_fakes.FakeProject.create_one_project()
columns = (
'description',
'id',
'is_public',
'name',
)
def setUp(self):
super(TestTypeCreate, self).setUp()
self.new_volume_type = volume_fakes.FakeType.create_one_type()
self.data = (
self.new_volume_type.description,
self.new_volume_type.id,
True,
self.new_volume_type.name,
)
self.types_mock.create.return_value = self.new_volume_type
self.projects_mock.get.return_value = self.project
# Get the command object to test
self.cmd = volume_type.CreateVolumeType(self.app, None)
def test_type_create_public(self):
arglist = [
"--description", self.new_volume_type.description,
"--public",
self.new_volume_type.name,
]
verifylist = [
("description", self.new_volume_type.description),
("public", True),
("private", False),
("name", self.new_volume_type.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.types_mock.create.assert_called_with(
self.new_volume_type.name,
description=self.new_volume_type.description,
is_public=True,
)
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
def test_type_create_private(self):
arglist = [
"--description", self.new_volume_type.description,
"--private",
"--project", self.project.id,
self.new_volume_type.name,
]
verifylist = [
("description", self.new_volume_type.description),
("public", False),
("private", True),
("project", self.project.id),
("name", self.new_volume_type.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.types_mock.create.assert_called_with(
self.new_volume_type.name,
description=self.new_volume_type.description,
is_public=False,
)
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
def test_public_type_create_with_project(self):
arglist = [
'--project', self.project.id,
self.new_volume_type.name,
]
verifylist = [
('project', self.project.id),
('name', self.new_volume_type.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(exceptions.CommandError,
self.cmd.take_action,
parsed_args)
def test_type_create_with_encryption(self):
encryption_info = {
'provider': 'LuksEncryptor',
'cipher': 'aes-xts-plain64',
'key_size': '128',
'control_location': 'front-end',
}
encryption_type = volume_fakes.FakeType.create_one_encryption_type(
attrs=encryption_info
)
self.new_volume_type = volume_fakes.FakeType.create_one_type(
attrs={'encryption': encryption_info})
self.types_mock.create.return_value = self.new_volume_type
self.encryption_types_mock.create.return_value = encryption_type
encryption_columns = (
'description',
'encryption',
'id',
'is_public',
'name',
)
encryption_data = (
self.new_volume_type.description,
format_columns.DictColumn(encryption_info),
self.new_volume_type.id,
True,
self.new_volume_type.name,
)
arglist = [
'--encryption-provider', 'LuksEncryptor',
'--encryption-cipher', 'aes-xts-plain64',
'--encryption-key-size', '128',
'--encryption-control-location', 'front-end',
self.new_volume_type.name,
]
verifylist = [
('encryption_provider', 'LuksEncryptor'),
('encryption_cipher', 'aes-xts-plain64'),
('encryption_key_size', 128),
('encryption_control_location', 'front-end'),
('name', self.new_volume_type.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.types_mock.create.assert_called_with(
self.new_volume_type.name,
description=None,
)
body = {
'provider': 'LuksEncryptor',
'cipher': 'aes-xts-plain64',
'key_size': 128,
'control_location': 'front-end',
}
self.encryption_types_mock.create.assert_called_with(
self.new_volume_type,
body,
)
self.assertEqual(encryption_columns, columns)
self.assertItemEqual(encryption_data, data)
class TestTypeDelete(TestType):
volume_types = volume_fakes.FakeType.create_types(count=2)
def setUp(self):
super(TestTypeDelete, self).setUp()
self.types_mock.get = volume_fakes.FakeType.get_types(
self.volume_types)
self.types_mock.delete.return_value = None
# Get the command object to mock
self.cmd = volume_type.DeleteVolumeType(self.app, None)
def test_type_delete(self):
arglist = [
self.volume_types[0].id
]
verifylist = [
("volume_types", [self.volume_types[0].id])
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.types_mock.delete.assert_called_with(self.volume_types[0])
self.assertIsNone(result)
def test_delete_multiple_types(self):
arglist = []
for t in self.volume_types:
arglist.append(t.id)
verifylist = [
('volume_types', arglist),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
calls = []
for t in self.volume_types:
calls.append(call(t))
self.types_mock.delete.assert_has_calls(calls)
self.assertIsNone(result)
def test_delete_multiple_types_with_exception(self):
arglist = [
self.volume_types[0].id,
'unexist_type',
]
verifylist = [
('volume_types', arglist),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
find_mock_result = [self.volume_types[0], exceptions.CommandError]
with mock.patch.object(utils, 'find_resource',
side_effect=find_mock_result) as find_mock:
try:
self.cmd.take_action(parsed_args)
self.fail('CommandError should be raised.')
except exceptions.CommandError as e:
self.assertEqual('1 of 2 volume types failed to delete.',
str(e))
find_mock.assert_any_call(
self.types_mock, self.volume_types[0].id)
find_mock.assert_any_call(self.types_mock, 'unexist_type')
self.assertEqual(2, find_mock.call_count)
self.types_mock.delete.assert_called_once_with(
self.volume_types[0]
)
class TestTypeList(TestType):
volume_types = volume_fakes.FakeType.create_types()
columns = [
"ID",
"Name",
"Is Public",
]
columns_long = columns + [
"Description",
"Properties"
]
data_with_default_type = [(
volume_types[0].id,
volume_types[0].name,
True
)]
data = []
for t in volume_types:
data.append((
t.id,
t.name,
t.is_public,
))
data_long = []
for t in volume_types:
data_long.append((
t.id,
t.name,
t.is_public,
t.description,
format_columns.DictColumn(t.extra_specs),
))
def setUp(self):
super(TestTypeList, self).setUp()
self.types_mock.list.return_value = self.volume_types
self.types_mock.default.return_value = self.volume_types[0]
# get the command to test
self.cmd = volume_type.ListVolumeType(self.app, None)
def test_type_list_without_options(self):
arglist = []
verifylist = [
("long", False),
("private", False),
("public", False),
("default", False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.types_mock.list.assert_called_once_with(is_public=None)
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_type_list_with_options(self):
arglist = [
"--long",
"--public",
]
verifylist = [
("long", True),
("private", False),
("public", True),
("default", False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.types_mock.list.assert_called_once_with(is_public=True)
self.assertEqual(self.columns_long, columns)
self.assertListItemEqual(self.data_long, list(data))
def test_type_list_with_private_option(self):
arglist = [
"--private",
]
verifylist = [
("long", False),
("private", True),
("public", False),
("default", False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.types_mock.list.assert_called_once_with(is_public=False)
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_type_list_with_default_option(self):
arglist = [
"--default",
]
verifylist = [
("encryption_type", False),
("long", False),
("private", False),
("public", False),
("default", True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.types_mock.default.assert_called_once_with()
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data_with_default_type, list(data))
def test_type_list_with_encryption(self):
encryption_type = volume_fakes.FakeType.create_one_encryption_type(
attrs={'volume_type_id': self.volume_types[0].id})
encryption_info = {
'provider': 'LuksEncryptor',
'cipher': None,
'key_size': None,
'control_location': 'front-end',
}
encryption_columns = self.columns + [
"Encryption",
]
encryption_data = []
encryption_data.append((
self.volume_types[0].id,
self.volume_types[0].name,
self.volume_types[0].is_public,
volume_type.EncryptionInfoColumn(
self.volume_types[0].id,
{self.volume_types[0].id: encryption_info}),
))
encryption_data.append((
self.volume_types[1].id,
self.volume_types[1].name,
self.volume_types[1].is_public,
volume_type.EncryptionInfoColumn(
self.volume_types[1].id, {}),
))
self.encryption_types_mock.list.return_value = [encryption_type]
arglist = [
"--encryption-type",
]
verifylist = [
("encryption_type", True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.encryption_types_mock.list.assert_called_once_with()
self.types_mock.list.assert_called_once_with(is_public=None)
self.assertEqual(encryption_columns, columns)
self.assertListItemEqual(encryption_data, list(data))
class TestTypeSet(TestType):
project = identity_fakes.FakeProject.create_one_project()
volume_type = volume_fakes.FakeType.create_one_type(
methods={'set_keys': None})
def setUp(self):
super(TestTypeSet, self).setUp()
self.types_mock.get.return_value = self.volume_type
# Return a project
self.projects_mock.get.return_value = self.project
self.encryption_types_mock.create.return_value = None
self.encryption_types_mock.update.return_value = None
# Get the command object to test
self.cmd = volume_type.SetVolumeType(self.app, None)
def test_type_set_name(self):
new_name = 'new_name'
arglist = [
'--name', new_name,
self.volume_type.id,
]
verifylist = [
('name', new_name),
('description', None),
('property', None),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'name': new_name,
}
self.types_mock.update.assert_called_with(
self.volume_type.id,
**kwargs
)
self.assertIsNone(result)
def test_type_set_description(self):
new_desc = 'new_desc'
arglist = [
'--description', new_desc,
self.volume_type.id,
]
verifylist = [
('name', None),
('description', new_desc),
('property', None),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'description': new_desc,
}
self.types_mock.update.assert_called_with(
self.volume_type.id,
**kwargs
)
self.assertIsNone(result)
def test_type_set_property(self):
arglist = [
'--property', 'myprop=myvalue',
self.volume_type.id,
]
verifylist = [
('name', None),
('description', None),
('property', {'myprop': 'myvalue'}),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.volume_type.set_keys.assert_called_once_with(
{'myprop': 'myvalue'})
self.assertIsNone(result)
def test_type_set_not_called_without_project_argument(self):
arglist = [
'--project', '',
self.volume_type.id,
]
verifylist = [
('project', ''),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertIsNone(result)
self.assertFalse(self.types_access_mock.add_project_access.called)
def test_type_set_failed_with_missing_volume_type_argument(self):
arglist = [
'--project', 'identity_fakes.project_id',
]
verifylist = [
('project', 'identity_fakes.project_id'),
]
self.assertRaises(tests_utils.ParserException,
self.check_parser,
self.cmd,
arglist,
verifylist)
def test_type_set_project_access(self):
arglist = [
'--project', self.project.id,
self.volume_type.id,
]
verifylist = [
('project', self.project.id),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertIsNone(result)
self.types_access_mock.add_project_access.assert_called_with(
self.volume_type.id,
self.project.id,
)
def test_type_set_new_encryption(self):
self.encryption_types_mock.update.side_effect = (
exceptions.NotFound('NotFound'))
arglist = [
'--encryption-provider', 'LuksEncryptor',
'--encryption-cipher', 'aes-xts-plain64',
'--encryption-key-size', '128',
'--encryption-control-location', 'front-end',
self.volume_type.id,
]
verifylist = [
('encryption_provider', 'LuksEncryptor'),
('encryption_cipher', 'aes-xts-plain64'),
('encryption_key_size', 128),
('encryption_control_location', 'front-end'),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
body = {
'provider': 'LuksEncryptor',
'cipher': 'aes-xts-plain64',
'key_size': 128,
'control_location': 'front-end',
}
self.encryption_types_mock.update.assert_called_with(
self.volume_type,
body,
)
self.encryption_types_mock.create.assert_called_with(
self.volume_type,
body,
)
self.assertIsNone(result)
@mock.patch.object(utils, 'find_resource')
def test_type_set_existing_encryption(self, mock_find):
mock_find.side_effect = [self.volume_type,
"existing_encryption_type"]
arglist = [
'--encryption-provider', 'LuksEncryptor',
'--encryption-cipher', 'aes-xts-plain64',
'--encryption-control-location', 'front-end',
self.volume_type.id,
]
verifylist = [
('encryption_provider', 'LuksEncryptor'),
('encryption_cipher', 'aes-xts-plain64'),
('encryption_control_location', 'front-end'),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
body = {
'provider': 'LuksEncryptor',
'cipher': 'aes-xts-plain64',
'control_location': 'front-end',
}
self.encryption_types_mock.update.assert_called_with(
self.volume_type,
body,
)
self.encryption_types_mock.create.assert_not_called()
self.assertIsNone(result)
def test_type_set_new_encryption_without_provider(self):
self.encryption_types_mock.update.side_effect = (
exceptions.NotFound('NotFound'))
arglist = [
'--encryption-cipher', 'aes-xts-plain64',
'--encryption-key-size', '128',
'--encryption-control-location', 'front-end',
self.volume_type.id,
]
verifylist = [
('encryption_cipher', 'aes-xts-plain64'),
('encryption_key_size', 128),
('encryption_control_location', 'front-end'),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
try:
self.cmd.take_action(parsed_args)
self.fail('CommandError should be raised.')
except exceptions.CommandError as e:
self.assertEqual("Command Failed: One or more of"
" the operations failed",
str(e))
body = {
'cipher': 'aes-xts-plain64',
'key_size': 128,
'control_location': 'front-end',
}
self.encryption_types_mock.update.assert_called_with(
self.volume_type,
body,
)
self.encryption_types_mock.create.assert_not_called()
class TestTypeShow(TestType):
columns = (
'access_project_ids',
'description',
'id',
'is_public',
'name',
'properties',
)
def setUp(self):
super(TestTypeShow, self).setUp()
self.volume_type = volume_fakes.FakeType.create_one_type()
self.data = (
None,
self.volume_type.description,
self.volume_type.id,
True,
self.volume_type.name,
format_columns.DictColumn(self.volume_type.extra_specs)
)
self.types_mock.get.return_value = self.volume_type
# Get the command object to test
self.cmd = volume_type.ShowVolumeType(self.app, None)
def test_type_show(self):
arglist = [
self.volume_type.id
]
verifylist = [
("encryption_type", False),
("volume_type", self.volume_type.id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.types_mock.get.assert_called_with(self.volume_type.id)
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
def test_type_show_with_access(self):
arglist = [
self.volume_type.id
]
verifylist = [
("volume_type", self.volume_type.id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
private_type = volume_fakes.FakeType.create_one_type(
attrs={'is_public': False})
type_access_list = volume_fakes.FakeTypeAccess.create_one_type_access()
with mock.patch.object(self.types_mock, 'get',
return_value=private_type):
with mock.patch.object(self.types_access_mock, 'list',
return_value=[type_access_list]):
columns, data = self.cmd.take_action(parsed_args)
self.types_mock.get.assert_called_once_with(
self.volume_type.id)
self.types_access_mock.list.assert_called_once_with(
private_type.id)
self.assertEqual(self.columns, columns)
private_type_data = (
format_columns.ListColumn([type_access_list.project_id]),
private_type.description,
private_type.id,
private_type.is_public,
private_type.name,
format_columns.DictColumn(private_type.extra_specs)
)
self.assertItemEqual(private_type_data, data)
def test_type_show_with_list_access_exec(self):
arglist = [
self.volume_type.id
]
verifylist = [
("volume_type", self.volume_type.id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
private_type = volume_fakes.FakeType.create_one_type(
attrs={'is_public': False})
with mock.patch.object(self.types_mock, 'get',
return_value=private_type):
with mock.patch.object(self.types_access_mock, 'list',
side_effect=Exception()):
columns, data = self.cmd.take_action(parsed_args)
self.types_mock.get.assert_called_once_with(
self.volume_type.id)
self.types_access_mock.list.assert_called_once_with(
private_type.id)
self.assertEqual(self.columns, columns)
private_type_data = (
None,
private_type.description,
private_type.id,
private_type.is_public,
private_type.name,
format_columns.DictColumn(private_type.extra_specs)
)
self.assertItemEqual(private_type_data, data)
def test_type_show_with_encryption(self):
encryption_type = volume_fakes.FakeType.create_one_encryption_type()
encryption_info = {
'provider': 'LuksEncryptor',
'cipher': None,
'key_size': None,
'control_location': 'front-end',
}
self.volume_type = volume_fakes.FakeType.create_one_type(
attrs={'encryption': encryption_info})
self.types_mock.get.return_value = self.volume_type
self.encryption_types_mock.get.return_value = encryption_type
encryption_columns = (
'access_project_ids',
'description',
'encryption',
'id',
'is_public',
'name',
'properties',
)
encryption_data = (
None,
self.volume_type.description,
format_columns.DictColumn(encryption_info),
self.volume_type.id,
True,
self.volume_type.name,
format_columns.DictColumn(self.volume_type.extra_specs)
)
arglist = [
'--encryption-type',
self.volume_type.id
]
verifylist = [
('encryption_type', True),
("volume_type", self.volume_type.id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.types_mock.get.assert_called_with(self.volume_type.id)
self.encryption_types_mock.get.assert_called_with(self.volume_type.id)
self.assertEqual(encryption_columns, columns)
self.assertItemEqual(encryption_data, data)
class TestTypeUnset(TestType):
project = identity_fakes.FakeProject.create_one_project()
volume_type = volume_fakes.FakeType.create_one_type(
methods={'unset_keys': None})
def setUp(self):
super(TestTypeUnset, self).setUp()
self.types_mock.get.return_value = self.volume_type
# Return a project
self.projects_mock.get.return_value = self.project
# Get the command object to test
self.cmd = volume_type.UnsetVolumeType(self.app, None)
def test_type_unset(self):
arglist = [
'--property', 'property',
'--property', 'multi_property',
self.volume_type.id,
]
verifylist = [
('property', ['property', 'multi_property']),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.volume_type.unset_keys.assert_called_once_with(
['property', 'multi_property'])
self.assertIsNone(result)
def test_type_unset_project_access(self):
arglist = [
'--project', self.project.id,
self.volume_type.id,
]
verifylist = [
('project', self.project.id),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertIsNone(result)
self.types_access_mock.remove_project_access.assert_called_with(
self.volume_type.id,
self.project.id,
)
def test_type_unset_not_called_without_project_argument(self):
arglist = [
'--project', '',
self.volume_type.id,
]
verifylist = [
('encryption_type', False),
('project', ''),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertIsNone(result)
self.encryption_types_mock.delete.assert_not_called()
self.assertFalse(self.types_access_mock.remove_project_access.called)
def test_type_unset_failed_with_missing_volume_type_argument(self):
arglist = [
'--project', 'identity_fakes.project_id',
]
verifylist = [
('project', 'identity_fakes.project_id'),
]
self.assertRaises(tests_utils.ParserException,
self.check_parser,
self.cmd,
arglist,
verifylist)
def test_type_unset_encryption_type(self):
arglist = [
'--encryption-type',
self.volume_type.id,
]
verifylist = [
('encryption_type', True),
('volume_type', self.volume_type.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.encryption_types_mock.delete.assert_called_with(self.volume_type)
self.assertIsNone(result)
class TestColumns(TestType):
def test_encryption_info_column_with_info(self):
fake_volume_type = volume_fakes.FakeType.create_one_type()
type_id = fake_volume_type.id
encryption_info = {
'provider': 'LuksEncryptor',
'cipher': None,
'key_size': None,
'control_location': 'front-end',
}
col = volume_type.EncryptionInfoColumn(type_id,
{type_id: encryption_info})
self.assertEqual(utils.format_dict(encryption_info),
col.human_readable())
self.assertEqual(encryption_info, col.machine_readable())
def test_encryption_info_column_without_info(self):
fake_volume_type = volume_fakes.FakeType.create_one_type()
type_id = fake_volume_type.id
col = volume_type.EncryptionInfoColumn(type_id, {})
self.assertEqual('-', col.human_readable())
self.assertIsNone(col.machine_readable())
| dtroyer/python-openstackclient | openstackclient/tests/unit/volume/v2/test_type.py | Python | apache-2.0 | 31,954 |
# --- import --------------------------------------------------------------------------------------
from hardware.opas.TOPAS.TOPAS import Driver as BaseDriver
from hardware.opas.TOPAS.TOPAS import GUI as BaseGUI
# --- driver --------------------------------------------------------------------------------------
class Driver(BaseDriver):
def __init__(self, *args, **kwargs):
self.motor_names = [
"Crystal_1",
"Delay_1",
"Crystal_2",
"Delay_2",
"Mixer_1",
"Mixer_2",
"Mixer_3",
]
self.curve_indices = {"Base": 1, "Mixer_1": 2, "Mixer_2": 3, "Mixer_3": 4}
self.kind = "TOPAS-C"
BaseDriver.__init__(self, *args, **kwargs)
# --- gui -----------------------------------------------------------------------------------------
class GUI(BaseGUI):
pass
| wright-group/PyCMDS | pycmds/hardware/opas/TOPAS/TOPAS-C.py | Python | mit | 886 |
from flask import Flask, url_for, redirect, render_template, request
try:
from mongoengine import *
except ImportError:
exit('You must have mongoengine installed. Install it with the command:\n\t$> easy_install mongoengine')
from flask.ext import superadmin, login, wtf
from flask.ext.superadmin.contrib import mongoenginemodel
from wtforms.fields import TextField, PasswordField
from wtforms.validators import Required, ValidationError
# Create application
app = Flask(__name__)
# Create dummy secrey key so we can use sessions
app.config['SECRET_KEY'] = '123456790'
# Database name for Mongo
app.config['DATABASE'] = 'dummy_db'
# Create user model. For simplicity, it will store passwords in plain text.
# Obviously that's not right thing to do in real world application.
class User(Document):
id = StringField(primary_key=True)
login = StringField(max_length=80, unique=True)
email = EmailField(max_length=120)
password = StringField(max_length=64)
# Flask-Login integration
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
# Required for administrative interface
def __unicode__(self):
return self.login
# Define login and registration forms (for flask-login)
class LoginForm(wtf.Form):
login = TextField(validators=[Required()])
password = PasswordField(validators=[Required()])
def validate_login(self, field):
user = self.get_user()
if user is None:
raise ValidationError('Invalid user')
if user.password != self.password.data:
raise ValidationError('Invalid password')
def get_user(self):
return User.objects.get(login=self.login)
class RegistrationForm(wtf.Form):
login = TextField(validators=[Required()])
email = TextField()
password = PasswordField(validators=[Required()])
def validate_login(self, field):
if len(User.objects(login=self.login.data)) > 0:
raise ValidationError('Duplicate username')
# Initialize flask-login
def init_login():
login_manager = login.LoginManager()
login_manager.setup_app(app)
# Create user loader function
@login_manager.user_loader
def load_user(user_id):
return User.objects.get(id=user_id)
# Create customized model view class
class MyModelView(mongoenginemodel.ModelView):
def is_accessible(self):
return login.current_user.is_authenticated()
# Create customized index view class
class MyAdminIndexView(superadmin.AdminIndexView):
def is_accessible(self):
return login.current_user.is_authenticated()
# Flask views
@app.route('/')
def index():
return render_template('index.html', user=login.current_user)
@app.route('/login/', methods=('GET', 'POST'))
def login_view():
form = LoginForm(request.form)
if form.validate_on_submit():
user = form.get_user()
login.login_user(user)
return redirect(url_for('index'))
return render_template('form.html', form=form)
@app.route('/register/', methods=('GET', 'POST'))
def register_view():
form = RegistrationForm(request.form)
if form.validate_on_submit():
user = User()
form.populate_obj(user)
user.id = user.login
user.save()
login.login_user(user)
return redirect(url_for('index'))
return render_template('form.html', form=form)
@app.route('/logout/')
def logout_view():
login.logout_user()
return redirect(url_for('index'))
if __name__ == '__main__':
# Initialize flask-login
init_login()
# Mongoengine connection
connect(app.config['DATABASE'])
# Create admin
admin = superadmin.Admin(app, 'Auth', index_view=MyAdminIndexView())
# Add view
admin.add_view(MyModelView(User))
# Start app
app.debug = True
app.run()
| syrusakbary/Flask-SuperAdmin | examples/auth/mongoauth.py | Python | bsd-3-clause | 3,951 |
# -*- coding: utf-8 -*-
import unittest
# noinspection PyUnresolvedReferences
from .._tins import IPv4Address
ip_string = "192.168.0.225"
class IPv4AddressTest(unittest.TestCase):
def test_constructor(self):
addr1 = IPv4Address(ip_string)
addr2 = IPv4Address(ip_string)
self.assertEquals(str(addr1), ip_string)
self.assertEquals(str(addr2), ip_string)
self.assertNotEquals(addr1, "192.168.0.254")
def test_convert_integer(self):
addr1 = IPv4Address(ip_string)
as_int = int(addr1)
addr2 = IPv4Address(as_int)
self.assertEquals(addr1, addr2)
as_int2 = int(addr2)
self.assertEquals(as_int, as_int2)
def test_convert_string(self):
addr1 = IPv4Address(ip_string)
self.assertEquals(str(addr1), ip_string)
def test_equality(self):
addr1 = IPv4Address(ip_string)
addr2 = IPv4Address(ip_string)
self.assertEquals(addr1, addr2)
self.assertNotEquals(addr1, "127.0.0.1")
def test_less_than(self):
addr1 = IPv4Address(ip_string)
addr2 = IPv4Address(ip_string)
self.assertFalse(addr1 < addr2)
self.assertFalse(addr1 > addr2)
self.assertLess(addr1, "192.168.1.2")
self.assertLess(addr1, "192.168.0.226")
self.assertLess(addr1, "193.0.0.0")
self.assertGreater("193.0.0.0", addr1)
def test_private(self):
l = [
(True, "192.168.0.1"), (True, "192.168.133.7"), (True, "192.168.255.254"), (False, "192.169.0.1"),
(False, "192.167.255.254"), (True, "10.0.0.1"), (True, "10.5.1.2"), (True, "10.255.255.254"),
(False, "11.0.0.1"), (False, "9.255.255.254"), (True, "172.16.0.1"), (True, "172.31.255.254"),
(True, "172.20.13.75"), (False, "172.15.0.1"), (False, "172.32.0.1"), (False, "100.100.100.100"),
(False, "199.199.29.10")
]
for res, addr in l:
self.assertEquals(IPv4Address(addr).is_private(), res)
def test_multicast(self):
self.assertTrue(IPv4Address("224.0.0.1").is_multicast())
self.assertTrue(IPv4Address("226.3.54.132").is_multicast())
self.assertTrue(IPv4Address("239.255.255.255").is_multicast())
self.assertFalse(IPv4Address("223.255.255.255").is_multicast())
self.assertFalse(IPv4Address("240.0.0.0").is_multicast())
def test_broadcast(self):
self.assertTrue(IPv4Address("255.255.255.255").is_broadcast())
self.assertFalse(IPv4Address("226.3.54.132").is_broadcast())
self.assertFalse(IPv4Address("127.0.0.1").is_broadcast())
def test_unicast(self):
self.assertFalse(IPv4Address("255.255.255.255").is_unicast())
self.assertFalse(IPv4Address("224.0.0.1").is_unicast())
self.assertTrue(IPv4Address("240.0.0.0").is_unicast())
self.assertTrue(IPv4Address("127.0.0.1").is_unicast())
| stephane-martin/cycapture | cycapture/libtins/tests/test_ipv4_address.py | Python | lgpl-3.0 | 2,910 |
# BlackJack game - one player vs automated dealer
# The player can stand or hit and are able to pick betting amount
# Player's total amount of money should be tracked
# Alerts about losses, wins, busts, etc ...
import random
import sys
class Player(object):
def __init__(self, name, score,money=100):
self.name = name
self.money = money
self.score = score
def add_money(self,amount):
self.money += amount
def take_money(self,amount):
self.money -= amount
def add_score(self,points):
self.score += points
def draw(gamer):
hand = random.randint(2,11)
# Each card should be assigned to its value
if hand == 10:
print(gamer.name + ' get J')
J = gamer.add_score(10)
print(gamer.name + ' score is ' + str(gamer.score))
elif hand == 11:
if (gamer.score + 11) <= 21:
print(gamer.name + ' get A')
A = gamer.add_score(11)
print(gamer.name + ' score is ' + str(gamer.score))
elif (gamer.score + 11) > 21:
print(gamer.name + ' get A')
A1 = gamer.add_score(1)
print(gamer.name + ' score is ' + str(gamer.score))
else:
print(gamer.name + ' get ' + str(hand))
gamer.add_score(hand)
print(gamer.name + ' score is ' + str(gamer.score))
# Player class shows the amount of money player have and the score of player
sam = Player('You', 0, money=100)
dealer = Player('Dealer', 0, 0)
# Dealer get his card
draw(dealer)
# Player choose the amount for the bet.
bet = int(input("Your bet > "))
sam.take_money(bet)
print("You have " + str(sam.money) + " money")
# Player gets two cards
draw(sam)
draw(sam)
while sam.score < 21:
# Player choose to stand or hit another card
hit = input("Want to draw a card? Y/N ")
if hit.lower() == 'y':
print("You have " + str(sam.money) + " money left")
draw(sam)
if sam.score > 21:
print("You got busted!")
elif hit.lower() == 'n':
print("Your score is " + str(sam.score))
break
else:
print("Wrong command!")
continue
def dealer_ai():
while dealer.score < 17:
draw(dealer)
if dealer.score > 21:
print("Dealer got busted. You won!")
# Player could win if he gets 21 or BJ with first two cards and dealer doesn't
if 21 >= sam.score > dealer.score or 21 > dealer.score == sam.score:
dealer_ai()
if sam.score == dealer.score <= 21:
print("It's a tie!")
elif 21 >= sam.score > dealer.score:
print("Congratulations! You Won!")
elif sam.score < dealer.score <= 21:
print("You loose. Too bad.")
| WaR1o/SimpleScripts | blackjack.py | Python | gpl-3.0 | 2,423 |
# -*- coding: utf-8 -*-
# Copyright (C) Aneesh Dogra <lionaneesh@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# You should have received a copy of the GNU General Public License
# along with this library; if not, write to the Free Software
# Foundation, 51 Franklin Street, Suite 500 Boston, MA 02110-1335 USA
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
from gi.repository import GdkPixbuf
import os
import cPickle
import pickle
import logging
from Editable_Textbox import Editable_Textbox
from infoslicer.processing.Article_Data import *
from infoslicer.processing.Article import Article
import book
# from infoslicer.processing import Journal_Getter as journal
logger = logging.getLogger('infoslicer')
# For journal images
IMG_WIDTH = 400
IMG_HEIGHT = 300
class Journal_Gallery_View( Gtk.HBox ):
"""
Created by Aneesh Dogra
Journal Gallery View
The journal gallery view displays the jounal images.
Drag-and-drop methods have been added to set up the images as a drag
source.
The data returned by drag-data-get will be a list containing
an Image_Data object and a Sentence_Data object.
These correspond to the image
and caption respectively.
"""
def __init__(self):
self.image_list = []
GObject.GObject.__init__(self)
self.current_index = -1
self.source_article_id = -1
left_button = Gtk.Button(label="\n\n << \n\n")
right_button = Gtk.Button(label="\n\n >> \n\n")
self.imagenumberlabel = Gtk.Label()
self.image = Gtk.Image()
self.imagebox = Gtk.EventBox()
self.imagebox.add(self.image)
self.imagebox.drag_source_set(Gdk.ModifierType.BUTTON1_MASK,
[],
Gdk.DragAction.COPY)
self.imagebox.drag_source_add_image_targets()
self.imagebox.connect("drag-begin", self.drag_begin_event, None)
logging.debug('##################### Galler_View.connect')
self.imagebox.connect("drag-data-get", self.drag_data_get_event, None)
self.caption = Gtk.Label(label="")
self.caption.set_line_wrap(True)
self.image_drag_container = Gtk.VBox()
self.image_drag_container.pack_start(self.imagenumberlabel, expand=False,
fill=False, padding=0)
self.image_drag_container.pack_start(self.imagebox, False, True, 0)
self.image_drag_container.pack_start(self.caption, False, True, 0)
image_container = Gtk.VBox()
image_container.pack_start(Gtk.Label(" "), True, True, 0)
image_container.pack_start(self.image_drag_container, False, True, 0)
image_container.pack_start(Gtk.Label(" "), True, True, 0)
left_button_container = Gtk.VBox()
left_button_container.pack_start(Gtk.Label(" "), True, True, 0)
left_button_container.pack_start(left_button, False, True, 0)
left_button_container.pack_start(Gtk.Label(" "), True, True, 0)
right_button_container = Gtk.VBox()
right_button_container.pack_start(Gtk.Label(" "), True, True, 0)
right_button_container.pack_start(right_button, False, True, 0)
right_button_container.pack_start(Gtk.Label(" "), True, True, 0)
self.pack_start(left_button_container, False, True, 0)
self.pack_start(image_container, True, True, 0)
self.pack_start(right_button_container, False, True, 0)
self.show_all()
right_button.connect("clicked", self.get_next_item, None)
left_button.connect("clicked", self.get_prev_item, None)
self.get_next_item(right_button, None)
def get_next_item(self, button, param):
if self.image_list == []:
self.caption.set_text("No images were found in the journal.")
self.image.clear()
return
self.current_index += 1
if self.current_index == len(self.image_list):
self.current_index = 0
self.imagebuf = GdkPixbuf.Pixbuf.new_from_file(self.image_list[self.current_index][0])
self.imagebuf = self.imagebuf.scale_simple(IMG_WIDTH, IMG_HEIGHT,
GdkPixbuf.InterpType.BILINEAR)
self.image.set_from_pixbuf(self.imagebuf)
self.caption.set_text("\n" + self.image_list[self.current_index][1])
self.imagenumberlabel.set_text("(%d / %d)\n" % (self.current_index+1, len(self.image_list)))
def get_prev_item(self, button, param):
if self.image_list == []:
self.caption.set_text("No images were found in the journal.")
self.image.clear()
return
if self.current_index == 0:
self.current_index = len(self.image_list)
self.current_index -= 1
self.imagebuf = GdkPixbuf.Pixbuf.new_from_file(self.image_list[self.current_index][0])
self.imagebuf = self.imagebuf.scale_simple(IMG_WIDTH, IMG_HEIGHT,
GdkPixbuf.InterpType.BILINEAR)
self.image.set_from_pixbuf(self.imagebuf)
self.caption.set_text("\n" + self.image_list[self.current_index][1])
self.imagenumberlabel.set_text("(%d / %d)\n" % (self.current_index+1, len(self.image_list)))
def get_first_item(self):
if self.image_list == []:
self.caption.set_text("No images were found in the journal.")
self.image.clear()
return
self.current_index = 0
self.imagebuf = GdkPixbuf.Pixbuf.new_from_file(self.image_list[self.current_index][0])
self.imagebuf = self.imagebuf.scale_simple(IMG_WIDTH, IMG_HEIGHT,
GdkPixbuf.InterpType.BILINEAR)
self.image.set_from_pixbuf(self.imagebuf)
self.caption.set_text("\n" + self.image_list[self.current_index][1])
logger.debug("setting text to:")
logger.debug("(%d / %d)\n" %
(self.current_index+1, len(self.image_list)))
self.imagenumberlabel.set_text("(%d / %d)\n" % (self.current_index+1, len(self.image_list)))
def drag_begin_event(self, widget, context, data):
logging.debug('########### Journal_Journal_Gallery_View.drag_begin_event called')
self.imagebox.drag_source_set_icon_pixbuf(self.imagebuf)
def drag_data_get_event(self, widget, context, selection_data, info, timestamp, data):
logger.debug('############# Journal_Journal_Gallery_View.drag_data_get_event')
atom = Gdk.atom_intern("section", only_if_exists=False)
imagedata = Picture_Data(self.source_article_id,
self.image_list[self.current_index][0])
captiondata = Sentence_Data(0, self.source_article_id, 0, 0, 0, self.image_list[self.current_index][1])
paragraph1data = Paragraph_Data(0, self.source_article_id, 0, 0, [imagedata])
paragraph2data = Paragraph_Data(0, self.source_article_id, 0, 0, [captiondata])
sectionsdata = [Section_Data(0, self.source_article_id, 0, [paragraph1data, paragraph2data])]
string = cPickle.dumps(sectionsdata)
selection_data.set(atom, 8, string)
def add_image(self, image_path, title):
logger.debug('############# Journal_Journal_Gallery_View.add_image')
self.image_list.append((image_path, title))
logger.debug(self.image_list)
self.get_first_item()
| walterbender/infoslicer | infoslicer/widgets/Journal_Gallery_View.py | Python | gpl-2.0 | 7,706 |
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""The function in this module sends a mail to the user (and admins if
required) saying that a record has been deleted from the repository.
"""
__revision__ = "$Id$"
import os
from invenio.errorlib import register_exception
from invenio.webuser import email_valid_p
from invenio.config import CFG_SITE_SUPPORT_EMAIL, CFG_SITE_NAME
from invenio.websubmit_config import CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN
from invenio.mailutils import send_email
CFG_MAIL_BODY = """
**This is an automated mail from %(site-name)s**
The following record was deleted from %(site-name)s:
Report number: %(report-number)s
It was deleted by %(deleter)s.
Please note that there may be a short delay before the record
disappears from its collection. It should be gone by tomorrow morning
at the latest.
Thankyou."""
def Send_Delete_Mail(parameters, curdir, form, user_info=None):
"""
In the event of a record having been deleted, this function is used
to the mail the submitter (and possibly the record "managers")
informing them about the record's deletion.
@parameters:
+ edsrn: The name of the file in the current submission's
working directory, in which the record's report number
is stored.
+ record_managers: A comma-separated string of the email
addresses of the record's managers. If given,
they will be (blind*) copied into the mail.
* At this time, they are only blind copied
because of send_email's behaviour of
blind copying everyone if "To" contains
multiple addresses. Anyway, blind was
wanted . . .
@return: empty string.
@Exceptions raised: None.
"""
## Get any addresses to which the mail should be copied:
## Get report number:
report_number_file = parameters["edsrn"]
report_number = \
Send_Delete_Mail_read_file("%s/%s" % \
(curdir, \
report_number_file)).strip()
########
## Get the "record_managers" parameter AND WASH THE EMAIL ADDRESSES
## TO BE SURE THAT THEY'RE VALID:
raw_record_managers = parameters["record_managers"]
record_managers = ""
try:
## We assume that the email addresses of item managers are
## separated by commas.
raw_record_managers_list = raw_record_managers.split(",")
for manager in raw_record_managers_list:
manager_address = manager.strip()
## Test that this manager's email address is OK, adding it if so:
if email_valid_p(manager_address):
## This address is OK - add it to the string of manager
## addresses:
record_managers += "%s," % manager_address
## Strip the trailing comma from record_managers (if there is one):
record_managers = record_managers.strip().rstrip(",")
except AttributeError:
## record_managers doesn't seem to be a string? Treat it as
## though it were empty:
record_managers = ""
##
########
## User email address:
user_email = user_info["email"]
## Concatenate the user's email address with the managers' addresses.
## Note: What we want to do here is send the mail to the user as "To"
## and to the managers as "bcc". At the time of writing though,
## send_email doesn't appear to allow email headers. It does have a
## strange behaviour though: If "To" contains more than one address,
## comma separated, ALL addresses will be put in "bcc" and the mail
## will appear to be sent to "undisclosed recipients".
if record_managers != "":
if user_email != "guest":
email_recipients = "%s,%s" % (user_email, record_managers)
else:
## Can't send mails to "guest"! Send only to managers.
email_recipients = record_managers
elif user_email == "guest":
## The user is a guest and there are no managers to send the mail
## to. Drop out quietly.
return ""
else:
## No managers to send the mail to. Send it only to the user.
email_recipients = user_email
mail_subj = "Document %s deleted from %s" \
% (report_number, CFG_SITE_NAME)
mail_body = CFG_MAIL_BODY % \
{ 'report-number' : report_number,
'deleter' : user_email,
'site-name' : CFG_SITE_NAME,
}
send_email(CFG_SITE_SUPPORT_EMAIL,
email_recipients,
mail_subj,
mail_body,
copy_to_admin=CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN)
##
return ""
def Send_Delete_Mail_read_file(filename):
"""Read a file from a path and return it as a string.
@param filename: (string) - the full path to the file to be read.
@return: (string) - the file's contents.
"""
file_contents = ""
if os.access("%s" % filename, os.R_OK):
try:
file_contents = open("%s" % filename, "r").read()
except IOError:
## There was a problem reading the file. Register the exception
## so that the admin is informed.
err_msg = """Error in a WebSubmit function. An unexpected """ \
"""error was encountered when trying to read from """ \
"""the file [%s].""" % filename
register_exception(prefix=err_msg)
return file_contents
| pombredanne/invenio | modules/websubmit/lib/functions/Send_Delete_Mail.py | Python | gpl-2.0 | 6,455 |
# This file is part of fedmsg.
# Copyright (C) 2012 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
from fedmsg_meta_fedora_infrastructure import BaseProcessor
from fedmsg_meta_fedora_infrastructure.fasshim import \
avatar_url_from_email, avatar_url_from_openid, email2fas
from fedmsg_meta_fedora_infrastructure.conglomerators.mailman import \
mailman as mailman_conglomerator
import email.utils
import warnings
import fedmsg.config
config = fedmsg.config.load_config()
def _full_email_to_email(full_from):
return email.utils.parseaddr(full_from)[1]
def _email_to_username(email):
return email2fas(email, **config)
class MailmanProcessor(BaseProcessor):
__name__ = "Mailman"
__description__ = "mailing list messages"
__link__ = "https://lists.fedoraproject.org"
__docs__ = ("https://fedoraproject.org/wiki/"
"Communicating_and_getting_help#Mailing_Lists")
__obj__ = "Mailing List Messages"
__icon__ = "http://cloud.ohloh.net/attachments/37686/mailman_med.png"
conglomerators = [
mailman_conglomerator.ByMessageId,
]
def subtitle(self, msg, **config):
if 'receive' in msg['topic']:
lst = msg['msg']['mlist']['list_name']
subject = msg['msg']['msg']['subject'].replace('\n', ' ')
full_from = msg['msg']['msg']['from']
user = _email_to_username(_full_email_to_email(full_from))
if not user:
user = "someone"
d = msg['msg']['msg']
if d['references'] or d['in-reply-to']:
tmpl = self._(
"On the {lst} list, {user} replied to '{subject}'")
else:
tmpl = self._("{user} wrote '{subject}' to the {lst} list")
return tmpl.format(lst=lst, user=user, subject=subject)
else:
warnings.warn("mailman3 message *must* have 'receive' in topic")
def secondary_icon(self, msg, **config):
full_from = msg['msg']['msg']['from']
email = _full_email_to_email(full_from)
# Can we find this person in FAS?
username = email2fas(email, **config)
if '@' in username:
# No? Then use their email for libravatar
return avatar_url_from_email(email)
else:
# Yes? Then use their openid like everywhere else.
return avatar_url_from_openid(username)
def link(self, msg, **config):
base_url = 'https://lists.fedoraproject.org/archives'
archived_at = msg['msg']['msg']['archived-at']
if archived_at and archived_at.startswith('<'):
archived_at = archived_at[1:]
if archived_at and archived_at.endswith('>'):
archived_at = archived_at[:-1]
if archived_at and archived_at.startswith('http'):
return archived_at
elif archived_at:
return base_url + archived_at
else:
return None
def usernames(self, msg, **config):
full_from = msg['msg']['msg']['from']
user = _email_to_username(_full_email_to_email(full_from))
if user and '@' not in user:
return set([user])
else:
return set()
def objects(self, msg, **config):
# Build a repr of all the messages in this thread
references = msg['msg']['msg']['references']
# Fall back to this header if there's nothing in the first.
if not references:
references = msg['msg']['msg']['in-reply-to']
references = references and references.split() or []
references = [r[1:-1] for r in references]
message_id = msg['msg']['msg']['message-id'][1:-1]
if references:
tokens = ['/'.join(references), message_id, 'message']
else:
tokens = [message_id, 'message']
lst = msg['msg']['mlist']['list_name']
tokens = [lst] + tokens
return set(['/'.join(tokens)])
| msimacek/fedmsg_meta_fedora_infrastructure | fedmsg_meta_fedora_infrastructure/mailman3.py | Python | lgpl-2.1 | 4,694 |
import sys
import os
from mock import patch
import pytest
sys.path.insert(0, '..')
import fix_rpath_osx
amberhome = os.getenv("AMBERHOME")
@pytest.fixture
def amber_src():
return amberhome
@pytest.fixture
def libcpptraj():
return os.path.join(amberhome, 'lib', 'libcpptraj.dylib')
def effect(cmd):
print(cmd)
def test_is_object_file(libcpptraj):
assert fix_rpath_osx.is_object_file(libcpptraj)
assert not fix_rpath_osx.is_object_file(__file__)
@patch('os.walk')
@patch('fix_rpath_osx.is_object_file')
def test_get_file_object_from_prefix(mock_obj_file, mock_walk):
mock_obj_file.return_value = True
mock_walk.side_effect = [[('1/bin', '', ['cpptraj'])], [('4', '5', ['6.dylib'])]]
files = list(fix_rpath_osx.get_file_object_from_prefix(amberhome))
assert files == [os.path.join('1/bin', 'cpptraj'),
os.path.join('4', '6.dylib')]
def test_add_loader_path(libcpptraj):
with patch('subprocess.check_call') as mock_call:
fix_rpath_osx.add_loader_path(libcpptraj, amberhome, 'lib')
mock_call.assert_called_with([
'install_name_tool', '-add_rpath', '@loader_path/../lib',
libcpptraj
])
mock_call.reset_mock()
pysander_so = os.path.join(
amberhome,
'lib/python3.6/site-packages/sander/pysander.cpython-36m-darwin.so'
)
fix_rpath_osx.add_loader_path(pysander_so, amberhome, 'lib')
mock_call.assert_called_with([
'install_name_tool', '-add_rpath', '@loader_path/../../../../lib',
pysander_so
])
def test_add_id(libcpptraj):
with patch('subprocess.check_call') as mock_call:
fix_rpath_osx.add_id(libcpptraj)
mock_call.assert_called_with([
'install_name_tool', '-id', '@rpath/libcpptraj.dylib', libcpptraj
])
def test_get_dylibs(libcpptraj):
expected_libs = sorted([
'{}/lib/libfftw3.3.dylib'.format(amberhome),
'{}/lib/libsander.dylib'.format(amberhome),
'/usr/local/gfortran/lib/libgfortran.3.dylib',
'/usr/local/gfortran/lib/libstdc++.6.dylib',
'/usr/local/gfortran/lib/libgcc_s.1.dylib',
'/usr/lib/libz.1.dylib',
'/usr/lib/libbz2.1.0.dylib',
'/usr/lib/libSystem.B.dylib',
])
assert sorted(fix_rpath_osx.get_dylibs(libcpptraj)) == expected_libs
@patch('subprocess.check_call')
def test_get_libs(mock_call, libcpptraj):
mock_call.side_effect = effect
libs = fix_rpath_osx.get_dylibs(libcpptraj)
amber_libs = [lib for lib in libs if amberhome in lib]
fix_rpath_osx.fix_linking_libs(libcpptraj, amber_libs)
@patch('fix_rpath_osx.add_loader_path')
@patch('fix_rpath_osx.add_id')
@patch('fix_rpath_osx.get_file_object_from_prefix')
@patch('subprocess.check_call')
def test_main(
mock_call,
mock_get_files,
mock_get_id,
mock_add_loader_path,
amber_src, libcpptraj):
mock_get_files.return_value = [os.path.join(amber_src, libcpptraj)]
fix_rpath_osx.main([amber_src])
mock_get_id.assert_called_with(libcpptraj)
mock_add_loader_path.assert_called_with(libcpptraj, amberhome, 'lib')
@patch('fix_rpath_osx.get_file_object_from_prefix')
@patch('subprocess.check_call')
def test_main_with_call(
mock_call, mock_get_files,
amber_src, libcpptraj):
mock_call.side_effect = effect
mock_get_files.return_value = [os.path.join(amber_src, 'lib', 'libcpptraj.dylib')]
origin_prefix = '/Users/travis/amber16'
fix_rpath_osx.main([amber_src, '--prefix', origin_prefix])
| Amber-MD/ambertools-conda-build | conda_tools/test/test_fix_rpath_osx.py | Python | mit | 3,587 |
# import_global_osm.py:
#
# This script is used to import boundaries from OpenStreetMap into
# MaPit.
#
# It takes KML data generated either by
# get-boundaries-by-admin-level.py, so you need to have run that
# script first.
#
# This script was originally based on import_norway_osm.py by Matthew
# Somerville.
#
# Copyright (c) 2011, 2012 UK Citizens Online Democracy. All rights reserved.
# Email: mark@mysociety.org; WWW: http://www.mysociety.org
import os
import re
import xml.sax
from optparse import make_option
from django.core.management.base import LabelCommand
# Not using LayerMapping as want more control, but what it does is what this does
#from django.contrib.gis.utils import LayerMapping
from django.contrib.gis.gdal import *
from django.contrib.gis.geos import MultiPolygon
from mapit.models import Area, Generation, Country, Type, Code, CodeType, NameType
from mapit.management.command_utils import save_polygons, KML
from mapit.management.command_utils import fix_invalid_geos_polygon, fix_invalid_geos_multipolygon
from glob import glob
import urllib2
from BeautifulSoup import BeautifulSoup
from collections import namedtuple
import json
def make_missing_none(s):
"""If s is empty (considering Unicode spaces) return None, else s"""
if re.search('(?uis)^\s*$', s):
return None
else:
return s
LanguageCodes = namedtuple('LanguageCodes',
['three_letter',
'two_letter',
'english_name',
'french_name'])
def get_iso639_2_table():
"""Scrape and return the table of ISO639-2 and ISO639-1 language codes
The OSM tags of the form "name:en", "name:fr", etc. refer to
ISO639-1 two-letter codes, or ISO639-2 three-letter codes. This
function parses the Library of Congress table of these values, and
returns them as a list of LanguageCodes"""
result = []
url = "http://www.loc.gov/standards/iso639-2/php/code_list.php"
f = urllib2.urlopen(url)
data = f.read()
f.close()
soup = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
table = soup.find('table', {'border': '1'})
for row in table.findAll('tr', recursive=False):
tds = row.findAll('td', recursive=False)
if len(tds) != 4:
continue
strings = ["".join(td.findAll(text=True)).strip() for td in tds]
result_row = LanguageCodes._make(make_missing_none(s) for s in strings)
result.append(result_row)
return result
class Command(LabelCommand):
help = 'Import OSM boundary data from KML files'
args = '<KML-DIRECTORY>'
option_list = LabelCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_label(self, directory_name, **options):
current_generation = Generation.objects.current()
new_generation = Generation.objects.new()
if not new_generation:
raise Exception, "No new generation to be used for import!"
if not os.path.isdir(directory_name):
raise Exception, "'%s' is not a directory" % (directory_name,)
os.chdir(directory_name)
mapit_type_glob = "[A-Z0-9][A-Z0-9][A-Z0-9]"
if not glob(mapit_type_glob):
raise Exception, "'%s' did not contain any directories that look like MapIt types (e.g. O11, OWA, etc.)" % (directory_name,)
def verbose(s):
if int(options['verbosity']) > 1:
print s.encode('utf-8')
verbose("Loading any admin boundaries from " + directory_name)
verbose("Finding language codes...")
language_code_to_name = {}
code_keys = ('two_letter', 'three_letter')
for row in get_iso639_2_table():
english_name = getattr(row, 'english_name')
for k in code_keys:
code = getattr(row, k)
if not code:
continue
# Some of the language codes have a bibliographic or
# terminology code, so strip those out:
codes = re.findall(r'(\w+) \([BT]\)', code)
if not codes:
codes = [code]
for c in codes:
language_code_to_name[c] = english_name
global_country = Country.objects.get(code='G')
# print json.dumps(language_code_to_name, sort_keys=True, indent=4)
skip_up_to = None
# skip_up_to = 'relation-80370'
skipping = bool(skip_up_to)
for type_directory in sorted(glob(mapit_type_glob)):
verbose("Loading type " + type_directory)
if not os.path.exists(type_directory):
verbose("Skipping the non-existent " + type_directory)
continue
verbose("Loading all KML in " + type_directory)
files = sorted(os.listdir(type_directory))
total_files = len(files)
for i, e in enumerate(files):
progress = "[%d%% complete] " % ((i * 100) / total_files,)
if skipping:
if skip_up_to in e:
skipping = False
else:
continue
if not e.endswith('.kml'):
verbose("Ignoring non-KML file: " + e)
continue
m = re.search(r'^(way|relation)-(\d+)-', e)
if not m:
raise Exception, u"Couldn't extract OSM element type and ID from: " + e
osm_type, osm_id = m.groups()
kml_filename = os.path.join(type_directory, e)
verbose(progress + "Loading " + unicode(os.path.realpath(kml_filename), 'utf-8'))
# Need to parse the KML manually to get the ExtendedData
kml_data = KML()
xml.sax.parse(kml_filename, kml_data)
useful_names = [n for n in kml_data.data.keys() if not n.startswith('Boundaries for')]
if len(useful_names) == 0:
raise Exception, "No useful names found in KML data"
elif len(useful_names) > 1:
raise Exception, "Multiple useful names found in KML data"
name = useful_names[0]
print " ", name.encode('utf-8')
if osm_type == 'relation':
code_type_osm = CodeType.objects.get(code='osm_rel')
elif osm_type == 'way':
code_type_osm = CodeType.objects.get(code='osm_way')
else:
raise Exception, "Unknown OSM element type:", osm_type
ds = DataSource(kml_filename)
layer = ds[0]
if len(layer) != 1:
raise Exception, "We only expect one feature in each layer"
feat = layer[0]
g = feat.geom.transform(4326, clone=True)
if g.geom_count == 0:
# Just ignore any KML files that have no polygons in them:
verbose(' Ignoring that file - it contained no polygons')
continue
# Nowadays, in generating the data we should have
# excluded any "polygons" with less than four points
# (the final one being the same as the first), but
# just in case:
polygons_too_small = 0
for polygon in g:
if polygon.num_points < 4:
polygons_too_small += 1
if polygons_too_small:
message = "%d out of %d polygon(s) were too small" % (polygons_too_small, g.geom_count)
verbose(' Skipping, since ' + message)
continue
g_geos = g.geos
if not g_geos.valid:
verbose(" Invalid KML:" + unicode(kml_filename, 'utf-8'))
fixed_multipolygon = fix_invalid_geos_multipolygon(g_geos)
if len(fixed_multipolygon) == 0:
verbose(" Invalid polygons couldn't be fixed")
continue
g = fixed_multipolygon.ogr
area_type = Type.objects.get(code=type_directory)
try:
osm_code = Code.objects.get(type=code_type_osm,
code=osm_id,
area__generation_high__lte=current_generation,
area__generation_high__gte=current_generation)
except Code.DoesNotExist:
verbose(' No area existed in the current generation with that OSM element type and ID')
osm_code = None
was_the_same_in_current = False
if osm_code:
m = osm_code.area
# First, we need to check if the polygons are
# still the same as in the previous generation:
previous_geos_geometry = m.polygons.collect()
if previous_geos_geometry is None:
verbose(' In the current generation, that area was empty - skipping')
else:
# Simplify it to make sure the polygons are valid:
previous_geos_geometry = previous_geos_geometry.simplify(tolerance=0)
new_geos_geometry = g.geos.simplify(tolerance=0)
if previous_geos_geometry.equals(new_geos_geometry):
was_the_same_in_current = True
else:
verbose(' In the current generation, the boundary was different')
if was_the_same_in_current:
# Extend the high generation to the new one:
verbose(' The boundary was identical in the previous generation; raising generation_high')
m.generation_high = new_generation
else:
# Otherwise, create a completely new area:
m = Area(
name = name,
type = area_type,
country = global_country,
parent_area = None,
generation_low = new_generation,
generation_high = new_generation,
)
poly = [ g ]
if options['commit']:
m.save()
verbose(' Area ID: ' + str(m.id))
if name not in kml_data.data:
print json.dumps(kml_data.data, sort_keys=True, indent=4)
raise Exception, u"Will fail to find '%s' in the dictionary" % (name,)
old_lang_codes = set(unicode(n.type.code) for n in m.names.all())
for k, translated_name in kml_data.data[name].items():
language_name = None
if k == 'name':
lang = 'default'
language_name = "OSM Default"
else:
name_match = re.search(r'^name:(.+)$', k)
if name_match:
lang = name_match.group(1)
if lang in language_code_to_name:
language_name = language_code_to_name[lang]
if not language_name:
continue
old_lang_codes.discard(unicode(lang))
# Otherwise, make sure that a NameType for this language exists:
NameType.objects.update_or_create({'code': lang},
{'code': lang,
'description': language_name})
name_type = NameType.objects.get(code=lang)
m.names.update_or_create({ 'type': name_type }, { 'name': translated_name })
if old_lang_codes:
verbose('Removing deleted languages codes: ' + ' '.join(old_lang_codes))
m.names.filter(type__code__in=old_lang_codes).delete()
# If the boundary was the same, the old Code
# object will still be pointing to the same Area,
# which just had its generation_high incremented.
# In every other case, there's a new area object,
# so create a new Code and save it:
if not was_the_same_in_current:
new_code = Code(area=m, type=code_type_osm, code=osm_id)
new_code.save()
save_polygons({ code : (m, poly) })
| New-Bamboo/mapit | mapit/management/commands/mapit_global_import.py | Python | agpl-3.0 | 13,126 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.