code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Skip test if cuda_ndarray is not available.
from __future__ import absolute_import, print_function, division
import itertools
from nose.plugins.skip import SkipTest
import numpy as np
from six.moves import xrange
from theano import tensor as T
import theano
from theano.tensor.extra_ops import cumsum, CumOp
from theano.tests import unittest_tools as utt
import theano.sandbox.cuda as cuda_ndarray
if cuda_ndarray.cuda_available:
import theano.tensor.tests.test_extra_ops
from theano.sandbox.cuda.extra_ops import GpuCumsum
else:
raise SkipTest('Optional package cuda disabled')
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
class TestGpuCumsum(theano.tensor.tests.test_extra_ops.TestCumOp):
mode = mode_with_gpu
def setUp(self):
super(TestGpuCumsum, self).setUp()
# Fetch some useful properties on the device
cuda = theano.sandbox.cuda
device_id = cuda.use.device_number
if device_id is None:
cuda.use("gpu",
force=False,
default_to_move_computation_to_gpu=False,
move_shared_float32_to_gpu=False,
enable_cuda=False,
test_driver=True)
device_id = cuda.use.device_number
cuda_ndarray = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray
prop = cuda_ndarray.device_properties(device_id)
self.max_threads_dim0 = prop['maxThreadsDim0']
self.max_grid_size1 = prop['maxGridSize1']
def test_Strides1D(self):
x = T.fvector('x')
for axis in [0, None, -1]:
a = np.random.random((42,)).astype("float32")
cumsum_function = theano.function([x], cumsum(x, axis=axis),
mode=self.mode)
slicings = [slice(None, None, None), # Normal strides
slice(None, None, 2), # Stepped strides
slice(None, None, -1), # Negative strides
]
# Cartesian product of all slicings to test.
for slicing in itertools.product(slicings, repeat=x.ndim):
f = theano.function([x], cumsum(x[slicing], axis=axis),
mode=self.mode)
assert [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, GpuCumsum)]
utt.assert_allclose(np.cumsum(a[slicing], axis=axis), f(a))
utt.assert_allclose(np.cumsum(a[slicing], axis=axis),
cumsum_function(a[slicing]))
def test_Strides2D(self):
x = T.fmatrix('x')
for axis in [0, 1, None, -1, -2]:
a = np.random.random((42, 30)).astype("float32")
cumsum_function = theano.function([x], cumsum(x, axis=axis),
mode=self.mode)
slicings = [slice(None, None, None), # Normal strides
slice(None, None, 2), # Stepped strides
slice(None, None, -1), # Negative strides
]
# Cartesian product of all slicings to test.
for slicing in itertools.product(slicings, repeat=x.ndim):
f = theano.function([x], cumsum(x[slicing], axis=axis),
mode=self.mode)
assert [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, GpuCumsum)]
utt.assert_allclose(np.cumsum(a[slicing], axis=axis), f(a))
utt.assert_allclose(np.cumsum(a[slicing], axis=axis),
cumsum_function(a[slicing]))
def test_Strides3D(self):
x = T.ftensor3('x')
for axis in [0, 1, 2, None, -1, -2, -3]:
a = np.random.random((42, 30, 25)).astype("float32")
cumsum_function = theano.function([x], cumsum(x, axis=axis),
mode=self.mode)
slicings = [slice(None, None, None), # Normal strides
slice(None, None, 2), # Stepped strides
slice(None, None, -1), # Negative strides
]
# Cartesian product of all slicings to test.
for slicing in itertools.product(slicings, repeat=x.ndim):
f = theano.function([x], cumsum(x[slicing], axis=axis),
mode=self.mode)
assert [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, GpuCumsum)]
utt.assert_allclose(np.cumsum(a[slicing], axis=axis), f(a))
utt.assert_allclose(np.cumsum(a[slicing], axis=axis),
cumsum_function(a[slicing]))
def test_GpuCumsum1D(self):
block_max_size = self.max_threads_dim0 * 2
x = T.fvector('x')
f = theano.function([x], cumsum(x), mode=self.mode)
assert [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, GpuCumsum)]
# Extensive testing for the first 1025 sizes
a = np.random.random(1025).astype("float32")
for i in xrange(a.shape[0]):
utt.assert_allclose(np.cumsum(a[:i]), f(a[:i]))
# Use multiple GPU threadblocks
a = np.random.random((block_max_size + 2,)).astype("float32")
utt.assert_allclose(np.cumsum(a), f(a))
# Use recursive cumsum
a = np.ones((block_max_size * (block_max_size + 1) + 2,),
dtype="float32")
utt.assert_allclose(np.cumsum(a), f(a))
def test_GpuCumsum2D(self):
block_max_size = self.max_threads_dim0 * 2
x = T.fmatrix('x')
for shape_axis, axis in zip([0, 1, 0, 1, 0], [0, 1, None, -1, -2]):
f = theano.function([x], cumsum(x, axis=axis), mode=self.mode)
assert [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, GpuCumsum)]
# Extensive testing for the first 1025 sizes
a_shape = [5, 5]
a_shape[shape_axis] = 1025
a = np.random.random(a_shape).astype("float32")
slices = [slice(None), slice(None)]
for i in xrange(a.shape[shape_axis]):
slices[shape_axis] = slice(i)
fa = f(a[slices])
npa = np.cumsum(a[slices], axis=axis)
utt.assert_allclose(npa, fa)
# Use multiple GPU threadblocks
a_shape = [5, 5]
a_shape[shape_axis] = block_max_size + 2
a = np.random.random(a_shape).astype("float32")
utt.assert_allclose(np.cumsum(a, axis=axis), f(a))
# Use multiple GPU gridblocks
a_shape = [4, 4]
a_shape[1 - shape_axis] = self.max_grid_size1 + 1
a = np.random.random(a_shape).astype("float32")
utt.assert_allclose(np.cumsum(a, axis=axis), f(a), rtol=5e-5)
# Use recursive cumsum
a_shape = [3, 3]
a_shape[shape_axis] = block_max_size * (
block_max_size + 1) + 2
a = np.random.random(a_shape).astype("float32")
a = np.sign(a - 0.5).astype("float32") # Avoid floating point error
utt.assert_allclose(np.cumsum(a, axis=axis), f(a))
def test_GpuCumsum3D(self):
block_max_size = self.max_threads_dim0 * 2
x = T.ftensor3('x')
for shape_axis, axis in zip([0, 1, 2, 0, 2, 1, 0], [0, 1, 2, None, -1, -2, -3]):
f = theano.function([x], cumsum(x, axis=axis), mode=self.mode)
assert [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, GpuCumsum)]
# Extensive testing for the first 1025 sizes
a_shape = [5, 5, 5]
a_shape[shape_axis] = 1025
a = np.random.rand(*a_shape).astype("float32")
slices = [slice(None), slice(None), slice(None)]
for i in xrange(a.shape[shape_axis]):
slices[shape_axis] = slice(i)
fa = f(a[slices])
npa = np.cumsum(a[slices], axis=axis)
utt.assert_allclose(npa, fa)
# Use multiple GPU threadblocks (along accumulation axis)
a_shape = [2, 2, 2]
a_shape[shape_axis] = block_max_size + 2
a = np.random.random(a_shape).astype("float32")
utt.assert_allclose(np.cumsum(a, axis=axis), f(a))
# Use multiple GPU gridblocks (not along accumulation axis)
a_shape = [5, 5, 5]
a_shape[(shape_axis + 1) % 3] = self.max_grid_size1 + 1
a = np.random.random(a_shape).astype("float32")
if axis is None:
# Avoid floating point error
a = np.sign(a - 0.5).astype("float32")
utt.assert_allclose(np.cumsum(a, axis=axis), f(a))
a_shape = [5, 5, 5]
a_shape[(shape_axis + 2) % 3] = self.max_grid_size1 + 1
a = np.random.random(a_shape).astype("float32")
if axis is None:
# Avoid floating point error
a = np.sign(a - 0.5).astype("float32")
utt.assert_allclose(np.cumsum(a, axis=axis), f(a))
# Use recursive cumsum (along accumulation axis)
a_shape = [3, 3, 3]
a_shape[shape_axis] = block_max_size * (
block_max_size + 1) + 2
a = np.random.random(a_shape).astype("float32")
a = np.sign(a - 0.5).astype(
"float32") # Avoid floating point error
utt.assert_allclose(np.cumsum(a, axis=axis), f(a))
def test_GpuCumsum4D(self):
# Should not use the GPU version.
x = T.ftensor4('x')
f = theano.function([x], cumsum(x, axis=1), mode=self.mode)
assert [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, CumOp)]
|
Weihonghao/ECM
|
Vpy34/lib/python3.5/site-packages/theano/sandbox/cuda/tests/test_extra_ops.py
|
Python
|
agpl-3.0
| 10,133
|
# Copyright 2011-2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Code for handling bug notification recipients in bug mail."""
__metaclass__ = type
__all__ = [
'BugNotificationRecipients',
]
from zope.interface import implements
from lp.services.mail.interfaces import INotificationRecipientSet
from lp.services.mail.notificationrecipientset import NotificationRecipientSet
class BugNotificationRecipients(NotificationRecipientSet):
"""A set of emails and rationales notified for a bug change.
Each email address registered in a BugNotificationRecipients is
associated to a string and a header that explain why the address is
being emailed. For instance, if the email address is that of a
distribution bug supervisor for a bug, the string and header will make
that fact clear.
The string is meant to be rendered in the email footer. The header
is meant to be used in an X-Launchpad-Message-Rationale header.
The first rationale registered for an email address is the one
which will be used, regardless of other rationales being added
for it later. This gives us a predictable policy of preserving
the first reason added to the registry; the callsite should
ensure that the manipulation of the BugNotificationRecipients
instance is done in preferential order.
Instances of this class are meant to be returned by
IBug.getBugNotificationRecipients().
"""
implements(INotificationRecipientSet)
def __init__(self, duplicateof=None):
"""Constructs a new BugNotificationRecipients instance.
If this bug is a duplicate, duplicateof should be used to
specify which bug ID it is a duplicate of.
Note that there are two duplicate situations that are
important:
- One is when this bug is a duplicate of another bug:
the subscribers to the main bug get notified of our
changes.
- Another is when the bug we are changing has
duplicates; in that case, direct subscribers of
duplicate bugs get notified of our changes.
These two situations are catered respectively by the
duplicateof parameter above and the addDupeSubscriber method.
Don't confuse them!
"""
super(BugNotificationRecipients, self).__init__()
self.duplicateof = duplicateof
self.subscription_filters = set()
def _addReason(self, person, reason, header):
"""Adds a reason (text and header) for a person.
It takes care of modifying the message when the person is notified
via a duplicate.
"""
if self.duplicateof is not None:
reason = reason + " (via bug %s)" % self.duplicateof.id
header = header + " via Bug %s" % self.duplicateof.id
reason = "You received this bug notification because you %s." % reason
self.add(person, reason, header)
def addDupeSubscriber(self, person, duplicate_bug=None):
"""Registers a subscriber of a duplicate of this bug."""
reason = "Subscriber of Duplicate"
if person.is_team:
text = ("are a member of %s, which is subscribed "
"to a duplicate bug report" % person.displayname)
reason += " @%s" % person.name
else:
text = "are subscribed to a\nduplicate bug report"
if duplicate_bug is not None:
text += " (%s)" % duplicate_bug.id
self._addReason(person, text, reason)
def addDirectSubscriber(self, person):
"""Registers a direct subscriber of this bug."""
reason = "Subscriber"
if person.is_team:
text = ("are a member of %s, which is subscribed "
"to the bug report" % person.displayname)
reason += " @%s" % person.name
else:
text = "are subscribed to the bug report"
self._addReason(person, text, reason)
def addAssignee(self, person):
"""Registers an assignee of a bugtask of this bug."""
reason = "Assignee"
if person.is_team:
text = ("are a member of %s, which is a bug assignee"
% person.displayname)
reason += " @%s" % person.name
else:
text = "are a bug assignee"
self._addReason(person, text, reason)
def addStructuralSubscriber(self, person, target):
"""Registers a structural subscriber to this bug's target."""
reason = "Subscriber (%s)" % target.displayname
if person.is_team:
text = ("are a member of %s, which is subscribed to %s" %
(person.displayname, target.displayname))
reason += " @%s" % person.name
else:
text = "are subscribed to %s" % target.displayname
self._addReason(person, text, reason)
def update(self, recipient_set):
"""See `INotificationRecipientSet`."""
super(BugNotificationRecipients, self).update(recipient_set)
self.subscription_filters.update(
recipient_set.subscription_filters)
def addFilter(self, subscription_filter):
if subscription_filter is not None:
self.subscription_filters.add(subscription_filter)
|
abramhindle/UnnaturalCodeFork
|
python/testdata/launchpad/lib/lp/bugs/mail/bugnotificationrecipients.py
|
Python
|
agpl-3.0
| 5,342
|
# -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import tapplicant
|
nishad-jobsglobal/odoo-marriot
|
openerp/addons/tapplicant_webcam/__init__.py
|
Python
|
agpl-3.0
| 842
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1102
from datetime import datetime
from django.db import models
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.db.models.signals import post_save, post_delete
from django.contrib.auth.models import User
from djangoratings.fields import RatingField
from voting.models import Vote
#XXX: poster deberia ser on_delete null en vez de cascade
# Thumbnails de la API
from sorl.thumbnail import get_thumbnail
from api_v2.utils import get_urlprefix
from django.core.urlresolvers import reverse
from django.db.models import Sum
from generic_aggregation import generic_annotate
from core.lib.strip_accents import strip_accents
class Serie(models.Model):
name = models.CharField(max_length=255)
slug_name = models.SlugField(unique=True, help_text=_('nombre en la URL'))
network = models.ForeignKey("Network", related_name="series")
genres = models.ManyToManyField("Genre", related_name="series")
runtime = models.IntegerField(
name=_('duracion de los episodios'),
blank=True,
null=True,
help_text=_( 'duracion del episodio en minutos' )
)
actors = models.ManyToManyField(
"Actor",
through='Role',
blank=True,
null=True,
editable=False,
help_text=_('actores que trabajaron en la serie'))
description = models.TextField()
finished = models.BooleanField(
default=False,
help_text=_('la serie ha finalizado?')
)
rating = RatingField(
range=5,
can_change_vote=True,
allow_delete=True,
help_text=_('puntuacion de estrellas')
)
poster = models.OneToOneField(
'ImageSerie',
related_name='poster_of',
null=True,
blank=True
)
def ascii_name(self):
return strip_accents(self.name_es)
def __unicode__(self):
return self.name
def save(self, force_insert=False, force_update=False, using=None):
''' When is saved, the title is converted to slug - aka URL'''
self.slug_name = slugify(self.name)
super(Serie, self).save(force_insert, force_update, using)
def url(self):
''' Devuelve la URL para la API (version 2) '''
return get_urlprefix() + reverse('API_v2_serie_detail',
kwargs={'serie_id': self.pk})
def get_absolute_full_url(self):
return get_urlprefix() + reverse('serie.views.get_serie',
kwargs={'serie_slug': self.slug_name, })
@models.permalink
def get_absolute_url(self):
return ('serie.views.get_serie', (),
{'serie_slug': self.slug_name, })
class Role(models.Model):
serie = models.ForeignKey("Serie")
actor = models.ForeignKey("Actor")
sortorder = models.IntegerField(blank=True, null=True)
role = models.CharField(
max_length=255,
help_text=_('personaje que el actor ha hecho en la serie')
)
class Meta:
unique_together = ("serie", "actor", "role")
def __unicode__(self):
return self.role
class SerieAlias(models.Model):
name = models.CharField(
max_length=255,
unique=True,
help_text=_('otros nombres para la misma serie')
)
serie = models.ForeignKey("Serie", related_name="aliases")
class Season(models.Model):
serie = models.ForeignKey('Serie', related_name="season", editable=False)
season = models.IntegerField(name=_("Temporada"))
def get_next_season(self):
next_season = self.season + 1
try:
return Season.objects.get(season=next_season, serie=self.serie)
except:
return None
def get_previous_season(self):
prev_season = self.season - 1
try:
return Season.objects.get(season=prev_season, serie=self.serie)
except:
return None
def __unicode__(self):
''' Serie Name - Season '''
return self.serie.name + ' - ' + str(self.season)
@models.permalink
def get_absolute_url(self):
return ('serie.ajax.season_lookup', (), {
'serie_id': self.serie.id,
'season': self.season,
})
@models.permalink
def get_season(self):
return ('serie.ajax.season_full_links_lookup', (), {
'serie_slug': self.serie.slug_name,
'season': self.season,
})
def get_default_user_for_links():
default_user = settings.DEFAULT_USER_FOR_LINKS
try:
user = User.objects.get(username=default_user)
return user
except User.DoesNotExist:
raise NameError(
"Debes crear un usuario valido para DEFAULT_USER_FOR_LINKS llamado %s" % (default_user)
)
class LinkSeason(models.Model):
season = models.ForeignKey("Season", related_name="links", editable=False)
url = models.CharField(
max_length=255,
unique=True,
db_index=True,
verbose_name="URL"
)
audio_lang = models.ForeignKey("Languages", related_name="audio_langs_season", verbose_name="Idioma")
subtitle = models.ForeignKey(
"Languages",
related_name="sub_langs_season",
null=True,
blank=True,
verbose_name="Subtitulos",
)
user = models.ForeignKey(
User,
related_name="user",
editable=False,
default=get_default_user_for_links
)
pub_date = models.DateTimeField(default=datetime.now, editable=False)
# For link checker, so it can deactivate and/or change the check_date
is_active = models.BooleanField(default=True, editable=False)
check_date = models.DateTimeField(null=True, blank=True, editable=False)
def __unicode__(self):
return self.url
class Episode(models.Model):
season = models.ForeignKey(
'Season',
related_name="episodes",
editable=False
)
air_date = models.DateField(
_('Fecha de emision'),
blank=True,
null=True,
)
title = models.CharField(
_('Titulo'),
max_length=255
)
episode = models.IntegerField(
_('Episodio'),
help_text=_( 'Numero de episodio en temporada' )
)
description = models.TextField(null=True, blank=True)
created_time = models.DateField(auto_now_add=True)
modified_time = models.DateField(auto_now=True)
poster = models.OneToOneField(
'ImageEpisode',
related_name='poster_of',
null=True,
blank=True,
editable=False
)
def get_next_episode(self):
next_epi = self.episode + 1
try:
return Episode.objects.get(episode=next_epi, season=self.season)
except:
return None
def get_next_5_episodes(self):
''' Muestra los proximos 5 episodios '''
try:
episodes = Episode.objects.filter(episode__gt=self.episode, season=self.season)
if episodes.count() < 5:
# Si no hay episodios suficientes los busca en la siguiente temporada
next_season = self.season.get_next_season()
next_episodes = Episode.objects.filter(episode__gte=1, season=next_season)
episodes = episodes | next_episodes
return episodes[:5]
except:
return None
def get_previous_episode(self):
prev_epi = self.episode - 1
try:
return Episode.objects.get(episode=prev_epi, season=self.season)
except:
return None
def season_episode(self):
return "S%02dE%02d" % (self.season.season, self.episode)
def get_absolute_url(self):
return '/serie/%s/episode/S%02dE%02d/' % (
self.season.serie.slug_name,
self.season.season,
self.episode
)
def get_add_link_url(self):
return '/serie/%s/episode/S%02dE%02d/add/' % (
self.season.serie.slug_name,
self.season.season,
self.episode
)
def from_future(self):
now = datetime.now().date()
air_date = self.air_date
if now < air_date:
return 'future'
else:
return ''
def __unicode__(self):
return self.title
class SorterManager(models.Manager):
def sorted_by_votes(self):
return generic_annotate(self, Vote.object, Sum('vote'))
class Link(models.Model):
episode = models.ForeignKey(
"Episode",
related_name="links",
editable=False
)
url = models.CharField(
max_length=255,
unique=True,
db_index=True,
verbose_name="URL"
)
audio_lang = models.ForeignKey(
"Languages",
related_name="audio_langs",
verbose_name="Idioma"
)
subtitle = models.ForeignKey(
"Languages",
related_name="sub_langs",
null=True,
blank=True,
verbose_name="Subtitulos",
)
user = models.ForeignKey(
User,
related_name="link_user",
editable=False,
default=get_default_user_for_links
)
pub_date = models.DateTimeField(
default=datetime.now,
help_text=_('cuando se ha subido el link? por defecto cuando se guarda'),
editable=False
)
# For link checker, so it can deactivate and/or change the check_date
is_active = models.BooleanField(default=True, editable=False)
check_date = models.DateTimeField(null=True, blank=True, editable=False)
objects = SorterManager()
def __unicode__(self):
return self.url
def get_score(self):
return Vote.objects.get_score(self)['score']
class SubtitleLink(models.Model):
''' For external subtitles '''
url = models.CharField(max_length=255)
lang = models.ForeignKey("Languages")
link = models.ForeignKey("Link", related_name="subtitles")
def __unicode__(self):
return self.url
class Languages(models.Model):
''' Languages for links '''
iso_code = models.CharField(max_length=2)
country = models.CharField(max_length=2, null=True, blank=True)
class Meta:
unique_together = ("iso_code", "country")
def __unicode__(self):
if self.country:
return "%s-%s" % (self.iso_code, self.country)
return self.iso_code
class Network(models.Model):
name = models.CharField(max_length=25)
url = models.URLField(null=True, blank=True)
slug_name = models.SlugField(unique=True, help_text=_('nombre en URL'))
def save(self, force_insert=False, force_update=False, using=None):
''' When is saved, the name is converted to slug - aka URL'''
if not self.slug_name:
self.slug_name = slugify(self.name)
super(Network, self).save(force_insert, force_update, using)
def __unicode__(self):
return self.name
class Genre(models.Model):
name = models.CharField(max_length=25)
slug_name = models.SlugField(unique=True, help_text=_('nombre en URL'))
def save(self, force_insert=False, force_update=False, using=None):
''' When is saved, the name is converted to slug - aka URL'''
if not self.slug_name:
self.slug_name = slugify(self.name)
super(Genre, self).save(force_insert, force_update, using)
def __unicode__(self):
return self.name
class Actor(models.Model):
name = models.CharField(max_length=100)
slug_name = models.SlugField(unique=True, help_text=_('nombre en URL'))
poster = models.OneToOneField(
'ImageActor',
related_name='poster_of',
null=True,
blank=True
)
def save(self, force_insert=False, force_update=False, using=None):
''' When is saved, the name is converted to slug - aka URL'''
if not self.slug_name:
self.slug_name = slugify(self.name)
super(Actor, self).save(force_insert, force_update, using)
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('get_actor', [str(self.slug_name)])
class ImageSerie(models.Model):
title = models.CharField(max_length=100)
src = models.ImageField(upload_to="img/serie")
creator = models.CharField(max_length=100, null=True, blank=True)
is_poster = models.BooleanField(
help_text=_('entre varias imagenes, cual es el poster?')
)
serie = models.ForeignKey("Serie", related_name="images")
objects = models.Manager()
def thumbnail(self):
''' Para la API, conseguir thumbnail '''
urlprefix = get_urlprefix()
return urlprefix + get_thumbnail(self.src, '400x300').url
def __unicode__(self):
return self.title
class ImageActor(models.Model):
title = models.CharField(max_length=100)
src = models.ImageField(upload_to="img/actor")
creator = models.CharField(max_length=100, null=True, blank=True)
is_poster = models.BooleanField(
help_text=_('entre varias imagenes, cual es el poster?')
)
actor = models.ForeignKey("Actor", related_name="images")
objects = models.Manager()
def __unicode__(self):
return self.title
class ImageEpisode(models.Model):
title = models.CharField(max_length=100)
src = models.ImageField(upload_to="img/episodes")
creator = models.CharField(max_length=100, null=True, blank=True)
is_poster = models.BooleanField(
help_text=_('entre varias imagenes, cual es el poster?')
)
episode = models.ForeignKey("Episode", related_name="images")
objects = models.Manager()
def thumbnail(self):
''' Para la API, conseguir thumbnail '''
urlprefix = get_urlprefix()
return urlprefix + get_thumbnail(self.src, '200x150').url
def __unicode__(self):
return self.title
poster_dispatch = {
ImageSerie: "serie",
ImageEpisode: "episode",
ImageActor: "actor",
}
def update_poster(sender, instance, **kwargs):
obj = getattr(instance, poster_dispatch[sender])
if instance.is_poster:
obj.poster = instance
else:
other_poster = sender.objects.filter(**{poster_dispatch[sender]:obj, "is_poster":True}).all()
if other_poster:
obj.poster = other_poster[0]
else:
obj.poster = None
obj.save()
def delete_poster(sender, instance, **kwargs):
obj = getattr(instance, poster_dispatch[sender])
other_poster = sender.objects.filter(**{poster_dispatch[sender]:obj, "is_poster":True}).all()
if other_poster:
obj.poster = other_poster[0]
else:
obj.poster = None
obj.save()
for sender in poster_dispatch.keys():
post_save.connect(update_poster, sender=sender)
post_delete.connect(update_poster, sender=sender)
|
alabs/petateca
|
petateca/apps/serie/models.py
|
Python
|
agpl-3.0
| 14,856
|
from time import sleep
from machine import Pin
defaultDelay = 1
pinMap = (16, 5, 4, 0, 2, 14, 12, 13, 15, 3)
outputs = [Pin(gpio, Pin.OUT) for gpio in pinMap[:8]]
def show_code(name='main.py'):
with open(name) as f:
for line in f.readlines():
print(line, end='')
def allOff():
for output in outputs:
output.low()
def allOn():
for output in outputs:
output.high()
def flashOne(index, delay=defaultDelay):
outputs[index].high()
sleep(delay)
outputs[index].low()
sleep(delay)
def flashAll(delay=defaultDelay):
allOn()
sleep(delay)
allOff()
sleep(delay)
def sequenceAll(positions=range(len(outputs)), delay=defaultDelay):
for position in positions:
flashOne(position, delay=delay)
bowl = outputs[3]
fishMap = [1,2,4]
fishes = [outputs[pos] for pos in fishMap]
def run():
allOff()
bowl.high()
while True:
fishes[0].high()
sleep(5)
fishes[0].low()
fishes[1].high()
sleep(1)
fishes[1].low()
fishes[2].high()
sleep(1)
fishes[2].low()
sleep(5)
run()
|
ShrimpingIt/tableaux
|
regimes/16_fishbowl/main.py
|
Python
|
agpl-3.0
| 1,009
|
%pythoncode{
class Wilcoxon:
def __init__(self, data, numGenes, class_1_size, class_2_size):
scores_vector = DoubleVector()
for x in range(0,numGenes):
scores_vector.push_back(0.0)
runWilcoxonTest(data, numGenes, class_1_size, class_2_size, scores_vector)
self.scores = [x for x in scores_vector]
self.sorted =None
self.cheat =len(self.scores) -1
def getScores(self):
return self.scores
def filterAdjust(self, filter):
"""
This function takes an integer and determines how many wilcoxon scores
are the same as the filterth element
In TSP and TST this acts as an adjustment to the number of genes that are
considered
"""
if self.sorted is None:
self.sorted = sorted(self.scores, reverse=True)
val = self.sorted[filter]
counter = filter
if self.sorted[self.cheat] == val and counter < self.cheat:
counter = self.cheat
n = len(self.sorted)
while counter < n and val == self.sorted[counter]:
counter += 1
self.cheat = counter -1
return counter-1
}
|
JohnCEarls/AUREA
|
src/AUREA/learner/src/wilcoxon_supp.py
|
Python
|
agpl-3.0
| 1,198
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2016 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import operator
import random
from six.moves.urllib.parse import urlparse, urljoin
from django.contrib.auth.models import User, Group
from django.core.cache import cache
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from whoosh.qparser import MultifieldParser, QueryParser
from whoosh.query import And, Every, Or, Term
from whoosh.sorting import FieldFacet, FunctionFacet
from wirecloud.commons.searchers import get_search_engine
from wirecloud.commons.utils.http import get_absolute_reverse_url
from wirecloud.commons.utils.template.parsers import TemplateParser
from wirecloud.commons.utils.version import Version
@python_2_unicode_compatible
class CatalogueResource(models.Model):
RESOURCE_TYPES = ('widget', 'mashup', 'operator')
RESOURCE_MIMETYPES = ('application/x-widget+mashable-application-component', 'application/x-mashup+mashable-application-component', 'application/x-operator+mashable-application-component')
TYPE_CHOICES = (
(0, 'Widget'),
(1, 'Mashup'),
(2, 'Operator'),
)
vendor = models.CharField(_('Vendor'), max_length=250)
short_name = models.CharField(_('Name'), max_length=250)
version = models.CharField(_('Version'), max_length=150)
type = models.SmallIntegerField(_('Type'), choices=TYPE_CHOICES, null=False, blank=False)
# Person who added the resource to catalogue!
creator = models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True, related_name='uploaded_resources')
public = models.BooleanField(_('Available to all users'), default=False)
users = models.ManyToManyField(User, verbose_name=_('Users'), related_name='local_resources', blank=True)
groups = models.ManyToManyField(Group, verbose_name=_('Groups'), related_name='local_resources', blank=True)
creation_date = models.DateTimeField('creation_date')
template_uri = models.CharField(_('templateURI'), max_length=200, blank=True)
popularity = models.DecimalField(_('popularity'), default=0, max_digits=2, decimal_places=1)
json_description = models.TextField(_('JSON description'))
@property
def local_uri_part(self):
return self.vendor + '/' + self.short_name + '/' + self.version
@property
def cache_version_key(self):
return '_catalogue_resource_version/%s' % self.id
@property
def cache_version(self):
version = cache.get(self.cache_version_key)
if version is None:
version = random.randrange(1, 100000)
cache.set(self.cache_version_key, version)
return version
def invalidate_cache(self):
try:
cache.incr(self.cache_version_key)
except ValueError:
pass
def is_available_for(self, user):
return self.public or self.users.filter(id=user.id).exists() or len(set(self.groups.all()) & set(user.groups.all())) > 0
def is_removable_by(self, user):
return user.is_superuser or self.creator == user
def get_template_url(self, request=None, for_base=False, url_pattern_name='wirecloud_catalogue.media'):
return get_template_url(self.vendor, self.short_name, self.version, '' if for_base else self.template_uri, request=request, url_pattern_name=url_pattern_name)
def get_template(self, request=None, url_pattern_name='wirecloud_catalogue.media'):
template_uri = self.get_template_url(request=request, url_pattern_name=url_pattern_name)
parser = TemplateParser(self.json_description, base=template_uri)
return parser
def get_processed_info(self, request=None, lang=None, process_urls=True, translate=True, process_variables=False, url_pattern_name='wirecloud_catalogue.media'):
if translate and lang is None:
from django.utils import translation
lang = translation.get_language()
else:
lang = None
parser = self.get_template(request, url_pattern_name=url_pattern_name)
return parser.get_resource_processed_info(lang=lang, process_urls=process_urls, translate=True, process_variables=process_variables)
def delete(self, *args, **kwargs):
from wirecloud.catalogue.utils import wgt_deployer
old_id = self.id
super(CatalogueResource, self).delete(*args, **kwargs)
# Preserve the id attribute a bit more so CatalogueResource methods can use it
self.id = old_id
# Undeploy the resource from the filesystem
try:
wgt_deployer.undeploy(self.vendor, self.short_name, self.version)
except:
# TODO log this error
pass # ignore errors
# Remove cache for this resource
self.invalidate_cache()
# Remove document from search indexes
try:
with get_search_engine('resource').get_batch_writer() as writer:
writer.delete_by_term('pk', '%s' % old_id)
except:
pass # ignore errors
# Remove id attribute definetly
self.id = None
def resource_type(self):
return self.RESOURCE_TYPES[self.type]
@property
def mimetype(self):
return self.RESOURCE_MIMETYPES[self.type]
class Meta:
unique_together = ("short_name", "vendor", "version")
def __str__(self):
return self.local_uri_part
def get_template_url(vendor, name, version, url, request=None, url_pattern_name='wirecloud_catalogue.media'):
if urlparse(url).scheme == '':
template_url = get_absolute_reverse_url(url_pattern_name, kwargs={
'vendor': vendor,
'name': name,
'version': version,
'file_path': url
}, request=request)
else:
template_url = url
return template_url
def add_absolute_urls(results, request=None):
for hit in results:
base_url = get_template_url(hit['vendor'], hit['name'], hit['version'], hit['template_uri'], request=request)
hit['uri'] = "/".join((hit['vendor'], hit['name'], hit['version']))
hit['image'] = "" if hit['image'] == '' else urljoin(base_url, hit['image'])
hit['smartphoneimage'] = "" if hit['image'] == '' else urljoin(base_url, hit['smartphoneimage'])
def add_other_versions(searcher, results, user, staff):
allow_q = []
if not staff:
allow_q = [Or([Term('public', 't'), Term('users', user.username.lower())] +
[Term('groups', group.name.lower()) for group in user.groups.all()])]
for result in results:
user_q = And([Term('vendor_name', '%s/%s' % (result['vendor'], result['name']))] + allow_q)
version_results = [h.fields()['version'] for h in searcher.search(user_q)]
result['others'] = [v for v in version_results if v != result['version']]
return results
def build_search_kwargs(user_q, request, types, staff, orderby):
if not staff:
user_q = And([user_q, Or([Term('public', 't'), Term('users', request.user.username)] +
[Term('groups', group.name) for group in request.user.groups.all()])])
if types and len(types) > 0:
user_q = And([user_q, Or([Term('type', resource_type) for resource_type in types])])
orderby_f = FieldFacet(orderby.replace('-', ''), reverse=orderby.find('-') > -1)
search_kwargs = {
'sortedby': [orderby_f],
'collapse': FieldFacet('vendor_name'),
'collapse_limit': 1,
'collapse_order': FunctionFacet(order_by_version)
}
return (user_q, search_kwargs)
def search(querytext, request, pagenum=1, maxresults=30, staff=False, scope=None,
orderby='-creation_date'):
search_engine = get_search_engine('resource')
search_result = {}
if pagenum < 1:
pagenum = 1
with search_engine.searcher() as searcher:
parser = MultifieldParser(search_engine.default_search_fields, searcher.schema)
user_q = querytext and parser.parse(querytext) or Every()
user_q, search_kwargs = build_search_kwargs(user_q, request, scope, staff, orderby)
hits = searcher.search(user_q, limit=(pagenum * maxresults) + 1, **search_kwargs)
if querytext and hits.is_empty():
correction_q = parser.parse(querytext)
corrected = searcher.correct_query(correction_q, querytext)
if corrected.query != correction_q:
querytext = corrected.string
search_result['corrected_q'] = querytext
user_q, search_kwargs = build_search_kwargs(corrected.query, request, scope, staff, orderby)
hits = searcher.search(user_q, limit=(pagenum * maxresults), **search_kwargs)
search_engine.prepare_search_response(search_result, hits, pagenum, maxresults)
search_result['results'] = add_other_versions(searcher, search_result['results'], request.user, staff)
add_absolute_urls(search_result['results'], request)
return search_result
def suggest(request, prefix='', limit=30):
reader = get_search_engine('resource').open_index().reader()
frequent_terms = {}
for fieldname in ['title', 'vendor', 'description']:
for frequency, term in reader.most_frequent_terms(fieldname, limit, prefix):
if term in frequent_terms:
frequent_terms[term] += frequency
else:
frequent_terms[term] = frequency
# flatten terms
return [term.decode('utf-8') for term, frequency in sorted(frequent_terms.items(), key=operator.itemgetter(1), reverse=True)[:limit]]
def order_by_version(searcher, docnum):
return Version(searcher.stored_fields(docnum)['version'], reverse=True)
|
jpajuelo/wirecloud
|
src/wirecloud/catalogue/models.py
|
Python
|
agpl-3.0
| 10,536
|
def quicksort(arr):
return sorted(arr)
#
|
ice1000/OI-codes
|
codewars/301-400/bug-fix-quick-sort.py
|
Python
|
agpl-3.0
| 46
|
#!/usr/bin/env python
# Copyright (C) 2004 Anthony Baxter
from distutils.core import setup
try:
import py2exe
except:
py2exe = None
from shtoom import __version__
class DependencyFailed(Exception): pass
class VersionCheckFailed(DependencyFailed): pass
import sys, os
if sys.version < '2.3':
raise VersionCheckFailed("Python 2.3 or later is required")
try:
import twisted
except ImportError:
raise DependencyFailed("You need Twisted - http://www.twistedmatrix.com/")
from twisted.copyright import version as tcversion
if not tcversion.startswith('SVN') and tcversion < '2':
raise VersionCheckFailed("Twisted 2.0 or later is required")
#try:
# import zope.interface
#except ImportError:
# raise DependencyFailed("You need to install zope.interface - http://zope.org/Products/ZopeInterface")
if py2exe is not None:
addnl = { 'console':['scripts/shtoomphone.py'],
'windows': [ { 'script':'script/shtoomphone.py',
'icon_resources' : [( 1, 'shtoom.ico')] } ] }
else:
addnl = {}
DataGlobs = ['*.glade','*.gladep','*.gif', '*.png']
def getDataFiles():
import fnmatch
files = []
out = []
for path, dirnames, filenames in os.walk('shtoom'):
if '.svn' in dirnames:
dirnames.remove('.svn')
wanted = []
for glob in DataGlobs:
wanted.extend(fnmatch.filter(filenames, glob))
if wanted:
files.extend([os.path.join(path, x) for x in wanted])
pkgdir = 'lib/python%d.%d/site-packages'%(sys.version_info[:2])
for f in files:
out.append([os.path.join(pkgdir,os.path.dirname(f)),(f,)])
return out
if sys.version_info < (2,4):
addnl['data_files'] = getDataFiles()
else:
addnl['data_files'] = []
addnl['package_data'] = {'': DataGlobs}
addnl['data_files'].extend([('share/shtoom/audio', ['share/shtoom/audio/ring.wav', 'share/shtoom/audio/ringback.wav',],),])
setup(
name = "shtoom",
version = __version__,
description = "Shtoom - SIP stack (including a softphone)",
author = "Anthony Baxter",
author_email = "anthony@interlink.com.au",
url = 'http://shtoom.divmod.org/',
packages = ['shtoom', 'shtoom.address', 'shtoom.multicast', 'shtoom.avail',
'shtoom.ui', 'shtoom.rtp', 'shtoom.ui.qtui',
'shtoom.ui.gnomeui', 'shtoom.ui.qtui', 'shtoom.ui.webui',
'shtoom.ui.webui.images', 'shtoom.test',
'shtoom.ui.textui', 'shtoom.ui.tkui', 'shtoom.ui.wxui',
'shtoom.audio', 'shtoom.app', 'shtoom.doug', 'shtoom.compat' ],
scripts = ['scripts/shtoomphone.py', 'scripts/shtam.py',
'scripts/shmessage.py', 'scripts/shecho.py',
'scripts/shtoominfo.py',
],
classifiers = [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Topic :: Internet',
'Topic :: Communications :: Internet Phone',
],
**addnl
)
|
braams/shtoom
|
setup.py
|
Python
|
lgpl-2.1
| 3,212
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# libavg - Media Playback Engine.
# Copyright (C) 2003-2013 Ulrich von Zadow
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Current versions can be found at www.libavg.de
#
# Original authors of this file are
# OXullo Interecans <x at brainrapers dot org>
# Richard Klemm <richy at coding-reality.de>
from collections import defaultdict
from collections import deque
import math
import libavg
from libavg import avg
from touchvisualization import DebugTouchVisualization
from touchvisualization import TouchVisualizationOverlay as TouchVisOverlay
import keyboardmanager as kbmgr
g_fontsize = 10
PANGO_ENTITIES_MAP = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def subscribe(publisher, msgID, callable_):
publisher.subscribe(msgID, callable_)
return lambda: publisher.unsubscribe(msgID, callable_)
class DebugWidgetFrame(avg.DivNode):
BORDER = 7
FRAME_HEIGHT_CHANGED = avg.Publisher.genMessageID()
def __init__(self, size, widgetCls, *args, **kwargs):
super(DebugWidgetFrame, self).__init__(size=size, *args, **kwargs)
self.registerInstance(self, None)
self.setup(widgetCls)
self.subscribe(self.SIZE_CHANGED, self._onSizeChanged)
self.size = size
self._onSizeChanged(size)
def setup(self, widgetCls):
self.__background = avg.RectNode(parent=self, opacity=0.8,
fillcolor='000000', fillopacity=0.8)
self.__widget = widgetCls(parent=self,
size=(max(0, self.width - self.BORDER * 2), 0),
pos=(self.BORDER, self.BORDER))
self.__selectHighlight = avg.RectNode(parent=self, color="35C0CD",
strokewidth=self.BORDER, opacity=0.8,
pos=(self.BORDER / 2, self.BORDER / 2), active=False, sensitive=False)
self.__boundary = avg.RectNode(parent=self, sensitive=False)
self.publish(DebugWidgetFrame.FRAME_HEIGHT_CHANGED)
self.__widget.subscribe(self.__widget.WIDGET_HEIGHT_CHANGED,
self.adjustWidgetHeight)
self.__widget.update()
def _onSizeChanged(self, size):
self.__boundary.size = size
self.__background.size = size
childSize = (max(0, size[0] - self.BORDER * 2), max(0, size[1] - self.BORDER * 2))
self.__selectHighlight.size = (max(0, size[0] - self.BORDER),
max(0, size[1] - self.BORDER))
self.__widget.size = childSize
self.__widget.syncSize(childSize)
def adjustWidgetHeight(self, height):
self.size = (max(0, self.width), height + 2 * self.BORDER)
self.notifySubscribers(DebugWidgetFrame.FRAME_HEIGHT_CHANGED, [])
def toggleSelect(self, event=None):
self.__selectHighlight.active = not(self.__selectHighlight.active)
def isSelected(self):
return self.__selectHighlight.active
def select(self):
self.__selectHighlight.active = True
def unselect(self):
self.__selectHighlight.active = False
def show(self):
self.active = True
self.__widget.onShow()
self.__widget.update()
def hide(self):
self.active = False
self.__widget.onHide()
@property
def widget(self):
return self.__widget
class DebugWidget(avg.DivNode):
SLOT_HEIGHT = 200
CAPTION = ''
WIDGET_HEIGHT_CHANGED = avg.Publisher.genMessageID()
def __init__(self, parent=None, **kwargs):
super(DebugWidget, self).__init__(**kwargs)
self.registerInstance(self, parent)
self.publish(DebugWidget.WIDGET_HEIGHT_CHANGED)
if self.CAPTION:
self._caption = avg.WordsNode(text=self.CAPTION, pivot=(0, 0),
opacity=0.5, fontsize=14, parent=self)
self._caption.angle = math.pi / 2
self._caption.pos = (self.width, 0)
def syncSize(self, size):
self._caption.width = size[1]
def update(self):
pass
def onShow(self):
pass
def onHide(self):
pass
def kill(self):
pass
NUM_COLS = 10
COL_WIDTH = 60
ROW_HEIGHT = g_fontsize + 2
class TableRow(avg.DivNode):
COL_POS_X = 0
ROW_ID = 0
def __init__(self, parent=None, **kwargs):
super(TableRow, self).__init__(**kwargs)
self.registerInstance(self, parent)
global NUM_COLS
NUM_COLS = int((self.parent.width - COL_WIDTH * 4) / COL_WIDTH)
self._initRow()
TableRow.ROW_ID += 1
def _initRow(self):
self.columnBackground = avg.RectNode(parent=self, fillcolor="222222",
fillopacity=0.6, opacity=0)
self.columnContainer = avg.DivNode(parent=self)
if TableRow.ROW_ID % 2 != 0:
self.columnBackground.fillopacity = 0
self.cols = [0] * NUM_COLS
self.liveColumn = avg.WordsNode(parent=self.columnContainer, fontsize=g_fontsize,
text="N/A - SPECIAL", size=(COL_WIDTH, ROW_HEIGHT), variant="bold")
for i in xrange(0, NUM_COLS):
self.cols[i] = (avg.WordsNode(parent=self.columnContainer,
fontsize=g_fontsize,
text="0", size=(COL_WIDTH / 2.0, ROW_HEIGHT),
pos=((i+1) * COL_WIDTH, 0)),
avg.WordsNode(parent=self.columnContainer,
fontsize=g_fontsize,
text="(0)", size=(COL_WIDTH / 2.0, ROW_HEIGHT),
pos=((i+1) * COL_WIDTH + COL_WIDTH / 2, 0),
color="000000"))
self.rowData = deque([(0, 0)] * (NUM_COLS + 1), maxlen=NUM_COLS + 1)
self.label = avg.WordsNode(parent=self, fontsize=g_fontsize, variant="bold")
self.setLabel("NONE")
@property
def height(self):
return self.label.height
def setLabel(self, label):
if self.label.text == label + ":":
return
self.label.text = label + ":"
TableRow.COL_POS_X = max(TableRow.COL_POS_X, self.label.width)
if self.label.width < TableRow.COL_POS_X:
self.parent.labelColumnSizeChanged()
def resizeLabelColumn(self):
self.columnContainer.pos = (TableRow.COL_POS_X + 10, 0)
self.columnBackground.size = (self.columnContainer.x + self.liveColumn.x +
self.liveColumn.width, g_fontsize)
def insertValue(self, data):
prevValue = self.rowData[0][0]
self.rowData.appendleft([data, data-prevValue])
for i in xrange(0, len(self.rowData)-1):
val, diff = self.rowData[i]
column = self.cols[i]
column[0].text = str(val)
column[1].text = "({diff})".format(diff=diff)
column[1].pos = (column[0].x + column[0].getLineExtents(0)[0] + 2,
column[0].y)
if diff == 0:
column[1].color = "000000"
elif diff < 0:
column[1].color = "00FF00"
else:
column[1].color = "FF0000"
def updateLiveColumn(self, value):
self.liveColumn.text = str(value)
class Table(avg.DivNode):
def __init__(self, parent=None, **kwargs):
super(Table, self).__init__(**kwargs)
self.registerInstance(self, parent)
def labelColumnSizeChanged(self):
for childID in xrange(0, self.getNumChildren()):
child = self.getChild(childID)
child.resizeLabelColumn()
class ObjectDumpWidget(DebugWidget):
CAPTION = 'Objects count'
def __init__(self, parent=None, **kwargs):
super(ObjectDumpWidget, self).__init__(**kwargs)
self.registerInstance(self, parent)
self.tableContainer = Table(parent=self, size=(self.width, self.SLOT_HEIGHT))
self.tableDivs = defaultdict(lambda: TableRow(parent=self.tableContainer))
def update(self):
objDump = libavg.player.getTestHelper().getObjectCount()
pos = (0, 0)
for key in sorted(objDump.iterkeys()):
val = objDump[key]
self.tableDivs[key].updateLiveColumn(val)
self.tableDivs[key].setLabel(key)
self.tableDivs[key].pos = pos
pos = (0, pos[1] + self.tableDivs[key].height)
height = len(objDump) * self.tableDivs[key].height
if self.height != height:
self.notifySubscribers(DebugWidget.WIDGET_HEIGHT_CHANGED, [height])
def persistColumn(self):
objDump = libavg.player.getTestHelper().getObjectCount()
for key, val in objDump.iteritems():
self.tableDivs[key].insertValue(val)
def syncSize(self, size):
self.tableContainer.size = (size[0], size[1] - (g_fontsize + 2))
def onShow(self):
self.intervalID = libavg.player.setInterval(1000, self.update)
kbmgr.bindKeyDown(keystring='i',
handler=self.persistColumn,
help="Object count snapshot",
modifiers=libavg.KEYMOD_CTRL)
def onHide(self):
if self.intervalID:
libavg.player.clearInterval(self.intervalID)
self.intervalID = None
kbmgr.unbindKeyDown(keystring='i', modifiers=libavg.KEYMOD_CTRL)
def kill(self):
self.onHide()
self.tableDivs = None
class GraphWidget(DebugWidget):
def __init__(self, **kwargs):
super(GraphWidget, self).__init__(**kwargs)
self.registerInstance(self, None)
self.__graph = None
def onShow(self):
if self.__graph:
self.__graph.active = True
else:
self.__graph = self._createGraph()
def onHide(self):
if self.__graph:
self.__graph.active = False
def kill(self):
self.__graph.unlink(True)
def _createGraph(self):
pass
class MemoryGraphWidget(GraphWidget):
CAPTION = 'Memory usage'
def _createGraph(self):
return libavg.graph.AveragingGraph(parent=self, size=self.size,
getValue=avg.getMemoryUsage)
class FrametimeGraphWidget(GraphWidget):
CAPTION = 'Time per frame'
def _createGraph(self):
return libavg.graph.SlidingBinnedGraph(parent=self,
getValue=libavg.player.getFrameTime,
binsThresholds=[0.0, 20.0, 40.0, 80.0, 160.0],
size=self.size)
class GPUMemoryGraphWidget(GraphWidget):
CAPTION = 'GPU Memory usage'
def _createGraph(self):
try:
libavg.player.getVideoMemUsed()
except RuntimeError:
return avg.WordsNode(parent=self,
text='GPU memory graph is not supported on this hardware',
color='ff5555')
else:
return libavg.graph.AveragingGraph(parent=self, size=self.size,
getValue=libavg.player.getVideoMemUsed)
class KeyboardManagerBindingsShower(DebugWidget):
CAPTION = 'Keyboard bindings'
def __init__(self, *args, **kwargs):
super(KeyboardManagerBindingsShower, self).__init__(**kwargs)
self.registerInstance(self, None)
self.keybindingWordNodes = []
kbmgr.publisher.subscribe(kbmgr.publisher.BINDINGS_UPDATED, self.update)
def clear(self):
for node in self.keybindingWordNodes:
node.unlink(True)
self.keybindingWordNodes = []
def update(self):
self.clear()
for binding in kbmgr.getCurrentBindings():
keystring = binding.keystring.decode('utf8')
modifiersStr = self.__modifiersToString(binding.modifiers)
if modifiersStr is not None:
key = '%s-%s' % (modifiersStr, keystring)
else:
key = keystring
if binding.type == libavg.avg.KEYDOWN:
key = '%s %s' % (unichr(8595), key)
else:
key = '%s %s' % (unichr(8593), key)
node = avg.WordsNode(
text='<span size="large"><b>%s</b></span>: %s' %
(key, binding.help),
fontsize=g_fontsize, parent=self)
self.keybindingWordNodes.append(node)
self._placeNodes()
def _placeNodes(self):
if not self.keybindingWordNodes:
return
maxWidth = max([node.width for node in self.keybindingWordNodes])
columns = int(self.parent.width / maxWidth)
rows = len(self.keybindingWordNodes) / columns
remainder = len(self.keybindingWordNodes) % columns
if remainder != 0:
rows += 1
colSize = self.parent.width / columns
currentColumn = 0
currentRow = 0
heights = [0] * columns
for node in self.keybindingWordNodes:
if currentRow == rows and currentColumn < columns - 1:
currentRow = 0
currentColumn += 1
node.pos = (currentColumn * colSize, heights[currentColumn])
heights[currentColumn] += node.height
currentRow += 1
finalHeight = max(heights)
if self.height != finalHeight:
self.notifySubscribers(self.WIDGET_HEIGHT_CHANGED, [finalHeight])
def __modifiersToString(self, modifiers):
def isSingleBit(number):
bitsSet = 0
for i in xrange(8):
if (1 << i) & number:
bitsSet += 1
return bitsSet == 1
if modifiers in (0, kbmgr.KEYMOD_ANY):
return None
allModifiers = []
for mod in dir(avg):
if 'KEYMOD_' in mod:
maskVal = int(getattr(avg, mod))
if isSingleBit(maskVal):
allModifiers.append((maskVal, mod))
modifiersStringsList = []
for modval, modstr in allModifiers:
if modifiers & modval:
modifiersStringsList.append(modstr.replace('KEYMOD_', ''))
for doubleMod in ['CTRL', 'META', 'SHIFT']:
left = 'L' + doubleMod
right = 'R' + doubleMod
if left in modifiersStringsList and right in modifiersStringsList:
modifiersStringsList.remove(left)
modifiersStringsList.remove(right)
modifiersStringsList.append(doubleMod)
return '/'.join(modifiersStringsList).lower()
class DebugPanel(avg.DivNode):
def __init__(self, parent=None, fontsize=10, **kwargs):
super(DebugPanel, self).__init__(**kwargs)
self.registerInstance(self, parent)
avg.RectNode(size=self.size, opacity=0, fillopacity=0.3, fillcolor='ff0000',
parent=self)
avg.WordsNode(text='Debug panel', fontsize=fontsize,
pos=(0, self.height - fontsize - fontsize / 3),
parent=self)
self.sensitive = False
self.active = False
self.__panel = None
self.__callables = []
self.__fontsize = fontsize
self.__touchVisOverlay = None
def setupKeys(self):
kbmgr.bindKeyDown(keystring='g',
handler=lambda: self.toggleWidget(GPUMemoryGraphWidget),
help="GPU memory graph",
modifiers=libavg.avg.KEYMOD_CTRL)
kbmgr.bindKeyDown(keystring='m',
handler=lambda: self.toggleWidget(MemoryGraphWidget),
help="Memory graph",
modifiers=libavg.avg.KEYMOD_CTRL)
kbmgr.bindKeyDown(keystring='f',
handler=lambda: self.toggleWidget(FrametimeGraphWidget),
help="Frametime graph",
modifiers=libavg.avg.KEYMOD_CTRL)
kbmgr.bindKeyDown(keystring='?',
handler=lambda: self.toggleWidget(KeyboardManagerBindingsShower),
help="Show keyboard bindings",
modifiers=kbmgr.KEYMOD_ANY)
kbmgr.bindKeyDown(keystring='o',
handler=lambda: self.toggleWidget(ObjectDumpWidget),
help="Object count table",
modifiers=libavg.avg.KEYMOD_CTRL)
kbmgr.bindKeyDown(keystring='v', handler=self.toggleTouchVisualization,
help="Cursor visualization",
modifiers=libavg.avg.KEYMOD_CTRL)
def addWidget(self, widgetCls, *args, **kwargs):
callable_ = lambda: self.__panel.addWidget(widgetCls, *args, **kwargs)
if self.__panel:
callable_()
else:
self.__callables.append(callable_)
def toggleWidget(self, *args, **kwargs):
if not self.active:
self.show()
self.__panel.ensureWidgetWisible(*args, **kwargs)
else:
self.__panel.toggleWidget(*args, **kwargs)
if not self.__panel.activeWidgetClasses:
self.hide()
def hide(self):
if self.__panel and self.active:
self.__panel.hide()
self.active = False
def show(self):
if self.__panel:
if not self.active:
self.__panel.show()
else:
self.forceLoadPanel()
self.active = True
def toggleVisibility(self):
if self.active:
self.hide()
else:
self.show()
def toggleTouchVisualization(self):
if self.__touchVisOverlay is None:
self.__touchVisOverlay = TouchVisOverlay(
isDebug=True,
visClass=DebugTouchVisualization,
size=self.parent.size,
parent=self.parent)
else:
self.__touchVisOverlay.unlink(True)
self.__touchVisOverlay = None
def forceLoadPanel(self):
if self.__panel is None:
self.__panel = _DebugPanel(parent=self, size=self.size,
fontsize=self.__fontsize)
for callable_ in self.__callables:
callable_()
class _DebugPanel(avg.DivNode):
def __init__(self, parent=None, fontsize=10, **kwargs):
super(_DebugPanel, self).__init__(**kwargs)
self.registerInstance(self, parent)
self.__slots = []
self.maxSize = self.size
self.size = (self.size[0], 0)
self.activeWidgetClasses = []
self.__selectedWidget = None
global g_fontsize
g_fontsize = fontsize
self.show()
def show(self):
for widgetFrame in self.__slots:
if widgetFrame:
widgetFrame.show()
self.updateWidgets()
def hide(self):
for widget in self.__slots:
if widget:
widget.hide()
def ensureWidgetWisible(self, widgetClass, *args, **kwargs):
if not widgetClass in self.activeWidgetClasses:
self.toggleWidget(widgetClass, *args, **kwargs)
def toggleWidget(self, widgetClass, *args, **kwargs):
if widgetClass in self.activeWidgetClasses:
self._removeWidgetByClass(widgetClass)
else:
self.addWidget(widgetClass, *args, **kwargs)
def addWidget(self, widgetClass, *args, **kwargs):
if widgetClass in self.activeWidgetClasses:
libavg.logger.warning("You can't add the same widget twice")
return
widgetFrame = DebugWidgetFrame((max(0, self.width), DebugWidget.SLOT_HEIGHT),
widgetClass)
height = 0
for frame in self.__slots:
if frame:
height += frame.height
height += widgetFrame.height
if height > self.maxSize[1]:
libavg.logger.warning("No vertical space left. "
"Delete a widget and try again")
return False
self.appendChild(widgetFrame)
widgetPlaced = False
for idx, slot in enumerate(self.__slots):
if slot is None:
self.__slots[idx] = widgetFrame
widgetPlaced = True
break
if not widgetPlaced:
self.__slots.append(widgetFrame)
widgetFrame.subscribe(widgetFrame.FRAME_HEIGHT_CHANGED, self._heightChanged)
self.reorderWidgets()
widgetFrame.show()
self.updateWidgets()
self.activeWidgetClasses.append(widgetClass)
def _removeWidgetByClass(self, widgetClass):
for frame in self.__slots:
if frame and frame.widget.__class__ == widgetClass:
self.removeWidgetFrame(frame)
return
def _heightChanged(self):
height = 0
for childID in xrange(0, self.getNumChildren()):
child = self.getChild(childID)
height += child.height
self.height = height
self.reorderWidgets()
def updateWidgets(self):
for childID in xrange(0, self.getNumChildren()):
self.getChild(childID).widget.update()
def selectWidget(self, id):
id = id % self.getNumChildren()
for childID in xrange(0, self.getNumChildren()):
self.getChild(childID).unselect()
self.getChild(id).select()
self.__selectedWidget = id
def selectPreviousWidget(self):
if self.__selectedWidget is None:
self.selectWidget(-1)
else:
self.selectWidget(self.__selectedWidget - 1)
def selectNextWidget(self):
if self.__selectedWidget is None:
self.selectWidget(0)
else:
self.selectWidget(self.__selectedWidget + 1)
def removeWidgetFrame(self, widgetFrame):
self.activeWidgetClasses.remove(widgetFrame.widget.__class__)
for idx, slot in enumerate(self.__slots):
if slot == widgetFrame:
self.__slots[idx] = None
break
widgetFrame.widget.kill()
widgetFrame.unlink(True)
self.reorderWidgets()
self.updateWidgets()
def removeSelectedWidgetFrames(self):
candidates = []
for childID in xrange(0, self.getNumChildren()):
child = self.getChild(childID)
if child.isSelected():
candidates.append(child)
for widgetFrame in candidates:
self.removeWidgetFrame(widgetFrame)
self.__selectedWidget = None
def reorderWidgets(self):
#TODO: This is no layout management, yet
count = 0
height = 0
for idx, widgetFrame in enumerate(self.__slots):
if widgetFrame:
widgetFrame.pos = (0, height)
count += 1
height += widgetFrame.height
self.size = (self.maxSize[0], height)
|
lynxis/libavg
|
src/python/app/debugpanel.py
|
Python
|
lgpl-2.1
| 23,377
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
import spack
import spack.cmd
description = "fetch archives for packages"
section = "build"
level = "long"
def setup_parser(subparser):
subparser.add_argument(
'-n', '--no-checksum', action='store_true', dest='no_checksum',
help="do not check packages against checksum")
subparser.add_argument(
'-m', '--missing', action='store_true',
help="also fetch all missing dependencies")
subparser.add_argument(
'-D', '--dependencies', action='store_true',
help="also fetch all dependencies")
subparser.add_argument(
'packages', nargs=argparse.REMAINDER,
help="specs of packages to fetch")
def fetch(parser, args):
if not args.packages:
tty.die("fetch requires at least one package argument")
if args.no_checksum:
spack.do_checksum = False
specs = spack.cmd.parse_specs(args.packages, concretize=True)
for spec in specs:
if args.missing or args.dependencies:
for s in spec.traverse(deptype_query=all):
package = spack.repo.get(s)
if args.missing and package.installed:
continue
package.do_fetch()
package = spack.repo.get(spec)
package.do_fetch()
|
wscullin/spack
|
lib/spack/spack/cmd/fetch.py
|
Python
|
lgpl-2.1
| 2,513
|
#!/usr/bin/env python3
# Copyright (C) 2016 Kaspar Schleiser <kaspar@schleiser.de>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import os
import sys
sys.path.append(os.path.join(os.environ['RIOTBASE'], 'dist/tools/testrunner'))
import testrunner
def testfunc(child):
child.expect('Available timers: (\d+)')
timers_num = int(child.match.group(1))
for timer in range(timers_num):
child.expect_exact('Testing TIMER_{}'.format(timer))
child.expect_exact('TIMER_{}: initialization successful'.format(timer))
child.expect_exact('TIMER_{}: stopped'.format(timer))
child.expect_exact('TIMER_{}: starting'.format(timer))
child.expect('TEST SUCCEEDED')
if __name__ == "__main__":
sys.exit(testrunner.run(testfunc))
|
ks156/RIOT
|
tests/periph_timer/tests/01-run.py
|
Python
|
lgpl-2.1
| 888
|
from gi.repository import Gtk
def view_focus_tool(view):
"""This little tool ensures the view grabs focus when a mouse press or
touch event happens."""
gesture = (
Gtk.GestureSingle(widget=view)
if Gtk.get_major_version() == 3
else Gtk.GestureSingle()
)
gesture.connect("begin", on_begin)
return gesture
def on_begin(gesture, sequence):
view = gesture.get_widget()
if not view.is_focus():
view.grab_focus()
|
amolenaar/gaphas
|
gaphas/tool/viewfocus.py
|
Python
|
lgpl-2.1
| 475
|
#!/usr/bin/env python
import unittest, sys, multifile, mimetools, base64
from ZSI import *
from ZSI import resolvers
try:
import cStringIO as StringIO
except ImportError:
import StringIO
class t6TestCase(unittest.TestCase):
"Test case wrapper for old ZSI t6 test case"
def checkt6(self):
try:
istr = StringIO.StringIO(intext)
m = mimetools.Message(istr)
cid = resolvers.MIMEResolver(m['content-type'], istr)
xml = cid.GetSOAPPart()
ps = ParsedSoap(xml, resolver=cid.Resolve)
except ParseException, e:
print >>OUT, FaultFromZSIException(e).AsSOAP()
self.fail()
except Exception, e:
# Faulted while processing; assume it's in the header.
print >>OUT, FaultFromException(e, 1, sys.exc_info()[2]).AsSOAP()
self.fail()
try:
dict = ps.Parse(typecode)
except Exception, e:
# Faulted while processing; now it's the body
print >>OUT, FaultFromException(e, 0, sys.exc_info()[2]).AsSOAP()
self.fail()
self.failUnlessEqual(dict['stringtest'], strExtTest,
"Failed to extract stringtest correctly")
print base64.encodestring(cid['partii@zolera.com'].read())
v = dict['b64']
print type(v), 'is type(v)'
self.failUnlessEqual(cid['partii@zolera.com'].getvalue(), v,
"mismatch")
print base64.encodestring(v)
from ZSI.wstools.c14n import Canonicalize
z = dict['xmltest']
print type(z), z
print Canonicalize(z)
def makeTestSuite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(t6TestCase, "check"))
return suite
def main():
unittest.main(defaultTest="makeTestSuite")
OUT = sys.stdout
typecode = TC.Struct(None, [
TC.String('b64'),
TC.String('stringtest'),
TC.XML('xmltest'),
])
intext='''Return-Path: <rsalz@zolera.com>
Received: from zolera.com (os390.zolera.com [10.0.1.9])
by zolera.com (8.11.0/8.11.0) with ESMTP id f57I2sf00832
for <rsalz@zolera.com>; Thu, 7 Jun 2001 14:02:54 -0400
Sender: rsalz@zolera.com
Message-ID: <3B1FC1D1.FF6B21B4@zolera.com>
Date: Thu, 07 Jun 2001 14:02:57 -0400
From: Rich Salz <rsalz@zolera.com>
X-Mailer: Mozilla 4.72 [en] (X11; U; Linux 2.2.14-5.0 i686)
X-Accept-Language: en
MIME-Version: 1.0
To: rsalz@zolera.com
Subject: mime with attachments
Content-Type: multipart/mixed;
boundary="------------68E4BAC5B266315E42428C64"
Status: R
This is a multi-part message in MIME format.
--------------68E4BAC5B266315E42428C64
Content-Type: text/plain; charset=us-ascii
Content-Transfer-Encoding: 7bit
<SOAP-ENV:Envelope
xmlns="http://www.example.com/schemas/TEST"
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:ZSI="http://www.zolera.com/schemas/ZSI/">
<SOAP-ENV:Body>
<hreftest>
<stringtest href="cid:part1@zolera.com"/>
<b64 href="cid:partii@zolera.com"/>
<xmltest href="cid:12@zolera.com"/>
</hreftest>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
--------------68E4BAC5B266315E42428C64
Content-Type: text/plain; charset=us-ascii;
name="abs.txt"
Content-Transfer-Encoding: 7bit
Content-ID: <part1@zolera.com>
Content-Disposition: inline;
filename="abs.txt"
Digitial Signatures in a Web Services World
An influential Forrestor report created the term inverted security: it's
not about who you keep out, it's about who you let in. Customer portals,
without a costly PKI deployment or application integration issues.
--------------68E4BAC5B266315E42428C64
Content-Type: application/pdf;
name="gmpharma.pdf"
Content-Transfer-Encoding: base64
Content-ID: <partii@zolera.com>
Content-Disposition: inline;
filename="gmpharma.pdf"
JVBERi0xLjINJeLjz9MNCjQzIDAgb2JqDTw8IA0vTGluZWFyaXplZCAxIA0vTyA0NSANL0gg
WyAxMTQ0IDM5NiBdIA0vTCA2NjkwMiANL0UgMTAyODIgDS9OIDkgDS9UIDY1OTI0IA0+PiAN
RB3nwVOQH9JpmFv6Ri2Zq7mlddSS2B5WcZwvAP+gy9QtuYlfqj1rsi9WqJOszzHXmXZ8fXxK
XBBztIpgbkRrd+SGtY4QXo0fX0VN86uKXwtrkd7h1qiq2FUtXl6uNfnCoyX1Dve1O3RPRyhG
sKn6fLMb+uSSIHPQkClRBwu5gechz/1PBUBSB34jXbPdMTIb+/wRP+pauSAhLBzFELDOgk5b
PaIPAnIudFovQTc7Df2Ws9Atz4Bua+oINphIOojogG5LP3Tb3oNu8bsmuK+wFXEdbfgFIx+G
gKULYx5A2WnaDXB5JeoRQg90S0HcX2dCPmRCqDXB/aX34KujsPwJ/UpRdxXPeAftDkQS6hag
bh/yTOiUyqBz9CzxnyMYQGDO0jrUZ47kkWfmYvVg
--------------68E4BAC5B266315E42428C64
Content-ID: <12@zolera.com>
<foo xmlns="example.com" xmlns:Z="zolera">
this is a foo
<b xmlns:Z="zolera">redundnant ns decl</b>
<b Z:x="this was first" Z:a="2nd-orig">b test</b>
</foo>
--------------68E4BAC5B266315E42428C64--
'''
strExtTest = '''
Digitial Signatures in a Web Services World
An influential Forrestor report created the term inverted security: it's
not about who you keep out, it's about who you let in. Customer portals,
without a costly PKI deployment or application integration issues.
'''
if __name__ == "__main__" : main()
|
ned14/BEurtle
|
Installer/test/ZSI-2.1-a1/test/test_t6.py
|
Python
|
lgpl-2.1
| 5,153
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import os
import sys
import recommonmark
from recommonmark.transform import AutoStructify
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'Moe Serifu Agent'
copyright = '2018, Moe Serifu Circle'
author = 'Moe Serifu Circle'
# The short X.Y version
version = '0.1'
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
]
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
napoleon_google_docstring = False
napoleon_use_param = False
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MoeSerifuAgentdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MoeSerifuAgent.tex', 'Moe Serifu Agent Documentation',
'Moe Serifu Circle \\& Contributors', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'moeserifuagent', 'Moe Serifu Agent Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MoeSerifuAgent', 'Moe Serifu Agent Documentation',
author, 'MoeSerifuAgent', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
def setup(app):
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: "docs.moeserifu.moe/" + url,
'enable_auto_toc_tree': True,
'auto_toc_tree_section': 'Contents',
'enable_auto_doc_ref': False,
}, True)
app.add_transform(AutoStructify)
|
dekarrin/moe-serifu-agent
|
python/sphinx/conf.py
|
Python
|
lgpl-3.0
| 6,239
|
# -*- coding: utf-8 -*-
import os
import re
import sys
import subprocess
import nixops.util
import nixops.resources
import nixops.ssh_util
class MachineDefinition(nixops.resources.ResourceDefinition):
"""Base class for NixOps machine definitions."""
def __init__(self, xml):
nixops.resources.ResourceDefinition.__init__(self, xml)
self.encrypted_links_to = set([e.get("value") for e in xml.findall("attrs/attr[@name='encryptedLinksTo']/list/string")])
self.store_keys_on_machine = xml.find("attrs/attr[@name='storeKeysOnMachine']/bool").get("value") == "true"
self.always_activate = xml.find("attrs/attr[@name='alwaysActivate']/bool").get("value") == "true"
self.keys = {k.get("name"): k.find("string").get("value") for k in xml.findall("attrs/attr[@name='keys']/attrs/attr")}
self.owners = [e.get("value") for e in xml.findall("attrs/attr[@name='owners']/list/string")]
class MachineState(nixops.resources.ResourceState):
"""Base class for NixOps machine state objects."""
vm_id = nixops.util.attr_property("vmId", None)
ssh_pinged = nixops.util.attr_property("sshPinged", False, bool)
public_vpn_key = nixops.util.attr_property("publicVpnKey", None)
store_keys_on_machine = nixops.util.attr_property("storeKeysOnMachine", True, bool)
keys = nixops.util.attr_property("keys", [], 'json')
owners = nixops.util.attr_property("owners", [], 'json')
# Nix store path of the last global configuration deployed to this
# machine. Used to check whether this machine is up to date with
# respect to the global configuration.
cur_configs_path = nixops.util.attr_property("configsPath", None)
# Nix store path of the last machine configuration deployed to
# this machine.
cur_toplevel = nixops.util.attr_property("toplevel", None)
def __init__(self, depl, name, id):
nixops.resources.ResourceState.__init__(self, depl, name, id)
self._ssh_pinged_this_time = False
self.ssh = nixops.ssh_util.SSH(self.logger)
self.ssh.register_flag_fun(self.get_ssh_flags)
self.ssh.register_host_fun(self.get_ssh_name)
self.ssh.register_passwd_fun(self.get_ssh_password)
self._ssh_private_key_file = None
def prefix_definition(self, attr):
return attr
@property
def started(self):
state = self.state
return state == self.STARTING or state == self.UP
def set_common_state(self, defn):
self.store_keys_on_machine = defn.store_keys_on_machine
self.keys = defn.keys
def stop(self):
"""Stop this machine, if possible."""
self.warn("don't know how to stop machine ‘{0}’".format(self.name))
def start(self):
"""Start this machine, if possible."""
pass
def get_load_avg(self):
"""Get the load averages on the machine."""
try:
res = self.run_command("cat /proc/loadavg", capture_stdout=True, timeout=15).rstrip().split(' ')
assert len(res) >= 3
return res
except nixops.ssh_util.SSHConnectionFailed:
return None
except nixops.ssh_util.SSHCommandFailed:
return None
# FIXME: Move this to ResourceState so that other kinds of
# resources can be checked.
def check(self):
"""Check machine state."""
res = CheckResult()
self._check(res)
return res
def _check(self, res):
avg = self.get_load_avg()
if avg == None:
if self.state == self.UP: self.state = self.UNREACHABLE
res.is_reachable = False
else:
self.state = self.UP
self.ssh_pinged = True
self._ssh_pinged_this_time = True
res.is_reachable = True
res.load = avg
# Get the systemd units that are in a failed state or in progress.
out = self.run_command("systemctl --all --full --no-legend",
capture_stdout=True).split('\n')
res.failed_units = []
res.in_progress_units = []
for l in out:
match = re.match("^([^ ]+) .* failed .*$", l)
if match: res.failed_units.append(match.group(1))
# services that are in progress
match = re.match("^([^ ]+) .* activating .*$", l)
if match: res.in_progress_units.append(match.group(1))
# Currently in systemd, failed mounts enter the
# "inactive" rather than "failed" state. So check for
# that. Hack: ignore special filesystems like
# /sys/kernel/config. Systemd tries to mount these
# even when they don't exist.
match = re.match("^([^\.]+\.mount) .* inactive .*$", l)
if match and not match.group(1).startswith("sys-") and not match.group(1).startswith("dev-"):
res.failed_units.append(match.group(1))
def restore(self, defn, backup_id, devices=[]):
"""Restore persistent disks to a given backup, if possible."""
self.warn("don't know how to restore disks from backup for machine ‘{0}’".format(self.name))
def remove_backup(self, backup_id):
"""Remove a given backup of persistent disks, if possible."""
self.warn("don't know how to remove a backup for machine ‘{0}’".format(self.name))
def backup(self, defn, backup_id):
"""Make backup of persistent disks, if possible."""
self.warn("don't know how to make backup of disks for machine ‘{0}’".format(self.name))
def reboot(self, hard=False):
"""Reboot this machine."""
self.log("rebooting...")
if self.state == self.RESCUE:
# We're on non-NixOS here, so systemd might not be available.
# The sleep is to prevent the reboot from causing the SSH
# session to hang.
reboot_command = "(sleep 2; reboot) &"
else:
reboot_command = "systemctl reboot"
self.run_command(reboot_command, check=False)
self.state = self.STARTING
self.ssh.reset()
def reboot_sync(self, hard=False):
"""Reboot this machine and wait until it's up again."""
self.reboot(hard=hard)
self.log_start("waiting for the machine to finish rebooting...")
nixops.util.wait_for_tcp_port(self.get_ssh_name(), 22, open=False, callback=lambda: self.log_continue("."))
self.log_continue("[down]")
nixops.util.wait_for_tcp_port(self.get_ssh_name(), 22, callback=lambda: self.log_continue("."))
self.log_end("[up]")
self.state = self.UP
self.ssh_pinged = True
self._ssh_pinged_this_time = True
self.send_keys()
def reboot_rescue(self, hard=False):
"""
Reboot machine into rescue system and wait until it is active.
"""
self.warn("machine ‘{0}’ doesn't have a rescue"
" system.".format(self.name))
def send_keys(self):
if self.store_keys_on_machine: return
self.run_command("mkdir -m 0700 -p /run/keys")
for k, v in self.get_keys().items():
self.log("uploading key ‘{0}’...".format(k))
tmp = self.depl.tempdir + "/key-" + self.name
f = open(tmp, "w+"); f.write(v); f.close()
self.run_command("rm -f /run/keys/" + k)
self.upload_file(tmp, "/run/keys/" + k)
self.run_command("chmod 600 /run/keys/" + k)
os.remove(tmp)
self.run_command("touch /run/keys/done")
def get_keys(self):
return self.keys
def get_ssh_name(self):
assert False
def get_ssh_flags(self):
return []
def get_ssh_password(self):
return None
@property
def public_ipv4(self):
return None
@property
def private_ipv4(self):
return None
def address_to(self, m):
"""Return the IP address to be used to access machone "m" from this machine."""
ip = m.public_ipv4
if ip: return ip
return None
def wait_for_ssh(self, check=False):
"""Wait until the SSH port is open on this machine."""
if self.ssh_pinged and (not check or self._ssh_pinged_this_time): return
self.log_start("waiting for SSH...")
nixops.util.wait_for_tcp_port(self.get_ssh_name(), 22, callback=lambda: self.log_continue("."))
self.log_end("")
if self.state != self.RESCUE:
self.state = self.UP
self.ssh_pinged = True
self._ssh_pinged_this_time = True
def write_ssh_private_key(self, private_key):
key_file = "{0}/id_nixops-{1}".format(self.depl.tempdir, self.name)
with os.fdopen(os.open(key_file, os.O_CREAT | os.O_WRONLY, 0600), "w") as f:
f.write(private_key)
self._ssh_private_key_file = key_file
return key_file
def get_ssh_private_key_file(self):
return None
def _logged_exec(self, command, **kwargs):
return nixops.util.logged_exec(command, self.logger, **kwargs)
def run_command(self, command, **kwargs):
"""
Execute a command on the machine via SSH.
For possible keyword arguments, please have a look at
nixops.ssh_util.SSH.run_command().
"""
# If we are in rescue state, unset locale specific stuff, because we're
# mainly operating in a chroot environment.
if self.state == self.RESCUE:
command = "export LANG= LC_ALL= LC_TIME=; " + command
return self.ssh.run_command(command, self.get_ssh_flags(), **kwargs)
def switch_to_configuration(self, method, sync, command=None):
"""
Execute the script to switch to new configuration.
This function has to return an integer, which is the return value of the
actual script.
"""
cmd = ("NIXOS_NO_SYNC=1 " if not sync else "")
if command is None:
cmd += "/nix/var/nix/profiles/system/bin/switch-to-configuration"
else:
cmd += command
cmd += " " + method
return self.run_command(cmd, check=False)
def copy_closure_to(self, path):
"""Copy a closure to this machine."""
# !!! Implement copying between cloud machines, as in the Perl
# version.
# It's usually faster to let the target machine download
# substitutes from nixos.org, so try that first.
if not self.has_really_fast_connection():
closure = subprocess.check_output(["nix-store", "-qR", path]).splitlines()
self.run_command("nix-store -j 4 -r --ignore-unknown " + ' '.join(closure), check=False)
# Any remaining paths are copied from the local machine.
env = dict(os.environ)
master = self.ssh.get_master()
env['NIX_SSHOPTS'] = ' '.join(self.get_ssh_flags() + master.opts)
self._logged_exec(
["nix-copy-closure", "--to", "root@" + self.get_ssh_name(), path]
+ ([] if self.has_really_fast_connection() else ["--gzip"]),
env=env)
def has_really_fast_connection(self):
return False
def generate_vpn_key(self):
try:
self.run_command("test -f /root/.ssh/id_charon_vpn")
_vpn_key_exists = True
except nixops.ssh_util.SSHCommandFailed:
_vpn_key_exists = False
if self.public_vpn_key and _vpn_key_exists: return
(private, public) = nixops.util.create_key_pair(key_name="NixOps VPN key of {0}".format(self.name))
f = open(self.depl.tempdir + "/id_vpn-" + self.name, "w+")
f.write(private)
f.seek(0)
res = self.run_command("umask 077 && mkdir -p /root/.ssh &&"
" cat > /root/.ssh/id_charon_vpn",
check=False, stdin=f)
if res != 0: raise Exception("unable to upload VPN key to ‘{0}’".format(self.name))
self.public_vpn_key = public
def upload_file(self, source, target, recursive=False):
master = self.ssh.get_master()
cmdline = ["scp"] + self.get_ssh_flags() + master.opts
if recursive:
cmdline += ['-r']
cmdline += [source, "root@" + self.get_ssh_name() + ":" + target]
return self._logged_exec(cmdline)
def download_file(self, source, target, recursive=False):
master = self.ssh.get_master()
cmdline = ["scp"] + self.get_ssh_flags() + master.opts
if recursive:
cmdline += ['-r']
cmdline += ["root@" + self.get_ssh_name() + ":" + source, target]
return self._logged_exec(cmdline)
def get_console_output(self):
return "(not available for this machine type)\n"
class CheckResult(object):
def __init__(self):
# Whether the resource exists.
self.exists = None
# Whether the resource is "up". Generally only meaningful for
# machines.
self.is_up = None
# Whether the resource is reachable via SSH.
self.is_reachable = None
# Whether the disks that should be attached to a machine are
# in fact properly attached.
self.disks_ok = None
# List of systemd units that are in a failed state.
self.failed_units = None
# List of systemd units that are in progress.
self.in_progress_units = None
# Load average on the machine.
self.load = None
# Error messages.
self.messages = []
# FIXME: add a check whether the active NixOS config on the
# machine is correct.
import nixops.backends.none
import nixops.backends.virtualbox
import nixops.backends.ec2
import nixops.backends.hetzner
import nixops.resources.ec2_keypair
import nixops.resources.ssh_keypair
import nixops.resources.sqs_queue
import nixops.resources.s3_bucket
import nixops.resources.iam_role
import nixops.resources.ec2_security_group
import nixops.resources.ebs_volume
import nixops.resources.elastic_ip
def create_definition(xml):
"""Create a machine definition object from the given XML representation of the machine's attributes."""
target_env = xml.find("attrs/attr[@name='targetEnv']/string").get("value")
for i in [nixops.backends.none.NoneDefinition,
nixops.backends.virtualbox.VirtualBoxDefinition,
nixops.backends.ec2.EC2Definition,
nixops.backends.hetzner.HetznerDefinition]:
if target_env == i.get_type():
return i(xml)
raise nixops.deployment.UnknownBackend("unknown backend type ‘{0}’".format(target_env))
def create_state(depl, type, name, id):
"""Create a machine state object of the desired backend type."""
for i in [nixops.backends.none.NoneState,
nixops.backends.virtualbox.VirtualBoxState,
nixops.backends.ec2.EC2State,
nixops.backends.hetzner.HetznerState,
nixops.resources.ec2_keypair.EC2KeyPairState,
nixops.resources.ssh_keypair.SSHKeyPairState,
nixops.resources.sqs_queue.SQSQueueState,
nixops.resources.iam_role.IAMRoleState,
nixops.resources.s3_bucket.S3BucketState,
nixops.resources.ec2_security_group.EC2SecurityGroupState,
nixops.resources.ebs_volume.EBSVolumeState,
nixops.resources.elastic_ip.ElasticIPState
]:
if type == i.get_type():
return i(depl, name, id)
raise nixops.deployment.UnknownBackend("unknown backend type ‘{0}’".format(type))
|
shlevy/nixops
|
nixops/backends/__init__.py
|
Python
|
lgpl-3.0
| 15,654
|
"""
fx-ring-mod.py - Stereo ring modulator.
This example shows how to build a ring modulation effect
with modulator's frequency and brightness controlled by
analog inputs (use audio inputs after the stereo audio
channels, ie. Input(2) is analog-in 0, Input(3) is
analog-in 1, etc.).
It also show how to send signal to analog outputs. Again,
use the outputs after the stereo audio channels, ie.
.out(2) writes to analog-out 0, .out(3) to analog-out 1,
etc.).
"""
# Set to True if you want to control the modulator
# frequency and brightness with analog inputs.
WITH_ANALOG_INPUT = True
# If False, set frequency and brightness values.
FREQUENCY = 500 # Hz
BRIGHTNESS = 0.05 # 0 -> 0.2
# If True, a positive value is sent on analog-out 0 and 1
# whenever there is an output signal (can be used to build
# a cheap vumeter with leds).
WITH_ANALOG_OUTPUT = True
# stereo input
src = Input([0,1])
# Don't know if the noise comes from my mic,
# but it sounds better with a gate on the input!
gate = Gate(src.mix(), thresh=-60, risetime=.005,
falltime=.02, lookahead=1, outputAmp=True)
srcg = src * gate
if WITH_ANALOG_INPUT:
# analog-in 0 (modulator's frequency)
i0 = Tone(Input(2), 8)
freq = Scale(i0, 0, 1, 1, 1000, 3)
# analog-in 1 (modulator's brightness)
i1 = Tone(Input(3), 8)
feed = i1 * 0.2
else:
freq = FREQUENCY
feed = BRIGHTNESS
# Modulation oscillator
mod = SineLoop(freq, feed)
# Ring modulation and stereo output
out = (srcg * mod).out()
if WITH_ANALOG_OUTPUT:
# analog out 0-1 (stereo vumeter)
fol = Sqrt(Clip(Follower(out, mul=4))).out(2)
|
belangeo/pyo-bela
|
examples/fx-ring-mod.py
|
Python
|
lgpl-3.0
| 1,626
|
# (C) British Crown Copyright 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.fileformats.pp.as_fields` function."""
from __future__ import (absolute_import, division, print_function)
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import mock
from iris.coords import DimCoord
from iris.fileformats._ff_cross_references import STASH_TRANS
import iris.fileformats.pp as pp
import iris.tests.stock as stock
class TestAsFields(tests.IrisTest):
def setUp(self):
self.cube = stock.realistic_3d()
def test_cube_only(self):
fields = pp.as_fields(self.cube)
for field in fields:
self.assertEqual(field.lbcode, 101)
def test_field_coords(self):
fields = pp.as_fields(self.cube,
field_coords=['grid_longitude',
'grid_latitude'])
for field in fields:
self.assertEqual(field.lbcode, 101)
if __name__ == "__main__":
tests.main()
|
Jozhogg/iris
|
lib/iris/tests/unit/fileformats/pp/test_as_fields.py
|
Python
|
lgpl-3.0
| 1,719
|
from rope.base.oi import objectdb
class MemoryDB(objectdb.FileDict):
def __init__(self, project, persist=None):
self.project = project
self._persist = persist
self.files = self
self._load_files()
self.project.data_files.add_write_hook(self.write)
def _load_files(self):
self._files = {}
if self.persist:
result = self.project.data_files.read_data(
"objectdb", compress=self.compress, import_=True
)
if result is not None:
self._files = result
def keys(self):
return self._files.keys()
def __iter__(self):
for f in self._files:
yield f
def __len__(self):
return len(self._files)
def __setitem__(self):
raise NotImplementedError()
def __contains__(self, key):
return key in self._files
def __getitem__(self, key):
return FileInfo(self._files[key])
def create(self, path):
self._files[path] = {}
def rename(self, file, newfile):
if file not in self._files:
return
self._files[newfile] = self._files[file]
del self[file]
def __delitem__(self, file):
del self._files[file]
def write(self):
if self.persist:
self.project.data_files.write_data("objectdb", self._files, self.compress)
@property
def compress(self):
return self.project.prefs.get("compress_objectdb", False)
@property
def persist(self):
if self._persist is not None:
return self._persist
else:
return self.project.prefs.get("save_objectdb", False)
class FileInfo(objectdb.FileInfo):
def __init__(self, scopes):
self.scopes = scopes
def create_scope(self, key):
self.scopes[key] = ScopeInfo()
def keys(self):
return self.scopes.keys()
def __contains__(self, key):
return key in self.scopes
def __getitem__(self, key):
return self.scopes[key]
def __delitem__(self, key):
del self.scopes[key]
def __iter__(self):
for s in self.scopes:
yield s
def __len__(self):
return len(self.scopes)
def __setitem__(self):
raise NotImplementedError()
class ScopeInfo(objectdb.ScopeInfo):
def __init__(self):
self.call_info = {}
self.per_name = {}
def get_per_name(self, name):
return self.per_name.get(name, None)
def save_per_name(self, name, value):
self.per_name[name] = value
def get_returned(self, parameters):
return self.call_info.get(parameters, None)
def get_call_infos(self):
for args, returned in self.call_info.items():
yield objectdb.CallInfo(args, returned)
def add_call(self, parameters, returned):
self.call_info[parameters] = returned
def __getstate__(self):
return (self.call_info, self.per_name)
def __setstate__(self, data):
self.call_info, self.per_name = data
|
python-rope/rope
|
rope/base/oi/memorydb.py
|
Python
|
lgpl-3.0
| 3,060
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
class Migration(migrations.Migration):
dependencies = [
('comercial', '0024_empresacomercial_logo'),
]
operations = [
migrations.AddField(
model_name='empresacomercial',
name='uuid',
field=models.UUIDField(default=uuid.uuid4),
),
]
|
dudanogueira/microerp
|
microerp/comercial/migrations/0025_empresacomercial_uuid.py
|
Python
|
lgpl-3.0
| 430
|
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of REDHAWK burstioInterfaces.
#
# REDHAWK burstioInterfaces is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# REDHAWK burstioInterfaces is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
import threading
from ossie.cf import CF, ExtendedCF
class Connection(object):
def __init__(self, port):
self.port = port
class UsesPort(object):
def __init__(self, name, portType):
self._connectionMutex = threading.Lock()
self._name = name
self._connections = {}
self.PortType = portType
self._connectListeners = []
self._disconnectListeners = []
def getName(self):
return self._name
def addConnectListener(self, listener):
self._connectListeners.append(listener)
def removeConnectListener(self, listener):
try:
self._connectListeners.remove(listener)
except:
pass
def addDisconnectListener(self, listener):
self._disconnectListeners.append(listener)
def removeDisconnectListener(self, listener):
try:
self._disconnectListeners.remove(listener)
except:
pass
def connectPort(self, connection, connectionId):
# Give a specific exception message for nil
if connection is None:
raise CF.Port.InvalidPort(1, 'Nil object reference')
# Attempt to narrow the remote object to the correct type; note this
# does not require the lock
try:
port = connection._narrow(self.PortType)
except:
raise CF.Port.InvalidPort(1, 'Object unreachable')
# If the narrow returned nil without throwing an exception, it's safe
# to assume the object is the wrong type
if port is None:
raise CF.Port.InvalidPort(1, 'Object is not a ' + self.PortType._NP_RepositoryId)
self._connectionMutex.acquire()
try:
entry = self._connections.get(connectionId, None)
if entry is None:
# Store the new connection and pass the new entry along to
# _connectionAdded
entry = Connection(port)
self._connections[connectionId] = entry
# Allow subclasses to do additional bookkeeping
self._connectionAdded(connectionId, entry)
else:
# Replace the object reference
entry.port = port
# Allow subclasses to do additional bookkeeping
self._connectionModified(connectionId, entry)
finally:
self._connectionMutex.release()
# Notify connection listeners
for listener in self._connectListeners:
listener(connectionId)
def disconnectPort(self, connectionId):
self._connectionMutex.acquire()
try:
if not connectionId in self._connections:
raise CF.Port.InvalidPort(2, 'No connection ' + connectionId)
# Allow subclasses to do additional cleanup
self._connectionRemoved(connectionId, self._connections[connectionId])
del self._connections[connectionId]
finally:
self._connectionMutex.release()
# Notify disconnection listeners
for listener in self._disconnectListeners:
listener(connectionId)
def _get_connections(self):
self._connectionMutex.acquire()
try:
return [ExtendedCF.UsesConnection(k,v.port) for k, v in self._connections.iteritems()]
finally:
self._connectionMutex.release()
def _connectionAdded(self, connectionId, connection):
pass
def _connectionModified(self, connectionId, connection):
pass
def _connectionRemoved(self, connectionId, connection):
pass
|
RedhawkSDR/burstioInterfaces
|
src/python/redhawk/burstio/usesport.py
|
Python
|
lgpl-3.0
| 4,491
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from typing import Dict, List
import numpy as np
from psi4 import core
from psi4.driver import constants
from psi4.driver.p4util.exceptions import *
def least_squares_fit_polynomial(xvals, fvals, localization_point, no_factorials=True, weighted=True, polynomial_order=4):
"""Performs and unweighted least squares fit of a polynomial, with specified order
to an array of input function values (fvals) evaluated at given locations (xvals).
See https://doi.org/10.1063/1.4862157, particularly eqn (7) for details. """
xpts = np.array(xvals) - localization_point
if weighted:
R = 1.0
p_nu = 1
epsilon = 1e-3
zvals = np.square(xpts/R)
weights = np.exp(-zvals) / (zvals**p_nu + epsilon**p_nu)
else:
weights = None
fit = np.polynomial.polynomial.polyfit(xpts, fvals, polynomial_order, w=weights)
# Remove the 1/n! coefficients
if no_factorials:
scalefac = 1.0
for n in range(2,polynomial_order+1):
scalefac *= n
fit[n] *= scalefac
return fit
def anharmonicity(rvals: List, energies: List, plot_fit: str = '', mol = None) -> Dict:
"""Generates spectroscopic constants for a diatomic molecules.
Fits a diatomic potential energy curve using a weighted least squares approach
(c.f. https://doi.org/10.1063/1.4862157, particularly eqn. 7), locates the minimum
energy point, and then applies second order vibrational perturbation theory to obtain spectroscopic
constants. Any number of points greater than 4 may be provided, and they should bracket the minimum.
The data need not be evenly spaced, and can be provided in any order. The data are weighted such that
those closest to the minimum have highest impact.
A dictionary with the following keys, which correspond to spectroscopic constants, is returned:
:param rvals: The bond lengths (in Angstrom) for which energies are
provided, of length at least 5 and equal to the length of the energies array
:param energies: The energies (Eh) computed at the bond lengths in the rvals list
:param plot_fit: A string describing where to save a plot of the harmonic and anharmonic fits, the
inputted data points, re, r0 and the first few energy levels, if matplotlib
is available. Set to 'screen' to generate an interactive plot on the screen instead. If a filename is
provided, the image type is determined by the extension; see matplotlib for supported file types.
:returns: (*dict*) Keys: "re", "r0", "we", "wexe", "nu", "ZPVE(harmonic)", "ZPVE(anharmonic)", "Be", "B0", "ae", "De"
corresponding to the spectroscopic constants in cm-1
"""
angstrom_to_bohr = 1.0 / constants.bohr2angstroms
angstrom_to_meter = 10e-10
# Make sure the input is valid
if len(rvals) != len(energies):
raise ValidationError("The number of energies must match the number of distances")
npoints = len(rvals)
if npoints < 5:
raise ValidationError("At least 5 data points must be provided to compute anharmonicity")
core.print_out("\n\nPerforming a fit to %d data points\n" % npoints)
# Sort radii and values first from lowest to highest radius
indices = np.argsort(rvals)
rvals = np.array(rvals)[indices]
energies = np.array(energies)[indices]
# Make sure the molecule the user provided is the active one
molecule = mol or core.get_active_molecule()
molecule.update_geometry()
natoms = molecule.natom()
if natoms != 2:
raise Exception("The current molecule must be a diatomic for this code to work!")
m1 = molecule.mass(0)
m2 = molecule.mass(1)
# Find rval of the minimum of energies, check number of points left and right
min_index = np.argmin(energies)
if min_index < 3 :
core.print_out("\nWarning: fewer than 3 points provided with a r < r(min(E))!\n")
if min_index >= len(energies) - 3:
core.print_out("\nWarning: fewer than 3 points provided with a r > r(min(E))!\n")
# Optimize the geometry, refitting the surface around each new geometry
core.print_out("\nOptimizing geometry based on current surface:\n\n")
re = rvals[min_index]
maxit = 30
thres = 1.0e-9
for i in range(maxit):
derivs = least_squares_fit_polynomial(rvals,energies,localization_point=re)
e,g,H = derivs[0:3]
core.print_out(" E = %20.14f, x = %14.7f, grad = %20.14f\n" % (e, re, g))
if abs(g) < thres:
break
re -= g/H
if i == maxit-1:
raise ConvergenceError("diatomic geometry optimization", maxit)
core.print_out(" Final E = %20.14f, x = %14.7f, grad = %20.14f\n" % (e, re, g))
if re < min(rvals):
raise Exception("Minimum energy point is outside range of points provided. Use a lower range of r values.")
if re > max(rvals):
raise Exception("Minimum energy point is outside range of points provided. Use a higher range of r values.")
# Convert to convenient units, and compute spectroscopic constants
d0,d1,d2,d3,d4 = derivs*constants.hartree2aJ
core.print_out("\nEquilibrium Energy %20.14f Hartrees\n" % e)
core.print_out("Gradient %20.14f\n" % g)
core.print_out("Quadratic Force Constant %14.7f MDYNE/A\n" % d2)
core.print_out("Cubic Force Constant %14.7f MDYNE/A**2\n" % d3)
core.print_out("Quartic Force Constant %14.7f MDYNE/A**3\n" % d4)
hbar = constants.h / (2.0 * np.pi)
mu = ((m1*m2)/(m1+m2))*constants.amu2kg
we = 5.3088375e-11 * np.sqrt(d2/mu)
wexe = (1.2415491e-6)*(we/d2)**2 * ((5.0*d3*d3)/(3.0*d2)-d4)
# Rotational constant: Be
I = ((m1*m2)/(m1+m2)) * constants.amu2kg * (re * angstrom_to_meter)**2
B = constants.h / (8.0 * np.pi**2 * constants.c * I)
# alpha_e and quartic centrifugal distortion constant
ae = -(6.0 * B**2 / we) * ((1.05052209e-3*we*d3)/(np.sqrt(B * d2**3))+1.0)
de = 4.0*B**3 / we**2
# B0 and r0 (plus re check using Be)
B0 = B - ae / 2.0
r0 = np.sqrt(constants.h / (8.0 * np.pi**2 * mu * constants.c * B0))
recheck = np.sqrt(constants.h / (8.0 * np.pi**2 * mu * constants.c * B))
r0 /= angstrom_to_meter
recheck /= angstrom_to_meter
# Fundamental frequency nu
nu = we - 2.0 * wexe
zpve_nu = 0.5 * we - 0.25 * wexe
zpve_we = 0.5 * we
# Generate pretty pictures, if requested
if(plot_fit):
try:
import matplotlib.pyplot as plt
except ImportError:
msg = "\n\tPlot not generated; matplotlib is not installed on this machine.\n\n"
print(msg)
core.print_out(msg)
# Correct the derivatives for the missing factorial prefactors
dvals = np.zeros(5)
dvals[0:5] = derivs[0:5]
dvals[2] /= 2
dvals[3] /= 6
dvals[4] /= 24
# Default plot range, before considering energy levels
minE = np.min(energies)
maxE = np.max(energies)
minR = np.min(rvals)
maxR = np.max(rvals)
# Plot vibrational energy levels
we_au = we / constants.hartree2wavenumbers
wexe_au = wexe / constants.hartree2wavenumbers
coefs2 = [ dvals[2], dvals[1], dvals[0] ]
coefs4 = [ dvals[4], dvals[3], dvals[2], dvals[1], dvals[0] ]
for n in range(3):
Eharm = we_au*(n+0.5)
Evpt2 = Eharm - wexe_au*(n+0.5)**2
coefs2[-1] = -Eharm
coefs4[-1] = -Evpt2
roots2 = np.roots(coefs2)
roots4 = np.roots(coefs4)
xvals2 = roots2 + re
xvals4 = np.choose(np.where(np.isreal(roots4)), roots4)[0].real + re
Eharm += dvals[0]
Evpt2 += dvals[0]
plt.plot(xvals2, [Eharm, Eharm], 'b', linewidth=1)
plt.plot(xvals4, [Evpt2, Evpt2], 'g', linewidth=1)
maxE = Eharm
maxR = np.max([xvals2,xvals4])
minR = np.min([xvals2,xvals4])
# Find ranges for the plot
dE = maxE - minE
minE -= 0.2*dE
maxE += 0.4*dE
dR = maxR - minR
minR -= 0.2*dR
maxR += 0.2*dR
# Generate the fitted PES
xpts = np.linspace(minR, maxR, 1000)
xrel = xpts - re
xpows = xrel[:, None] ** range(5)
fit2 = np.einsum('xd,d', xpows[:,0:3], dvals[0:3])
fit4 = np.einsum('xd,d', xpows, dvals)
# Make / display the plot
plt.plot(xpts, fit2, 'b', linewidth=2.5, label='Harmonic (quadratic) fit')
plt.plot(xpts, fit4, 'g', linewidth=2.5, label='Anharmonic (quartic) fit')
plt.plot([re, re], [minE, maxE], 'b--', linewidth=0.5)
plt.plot([r0, r0], [minE, maxE], 'g--', linewidth=0.5)
plt.scatter(rvals, energies, c='Black', linewidth=3, label='Input Data')
plt.legend()
plt.xlabel('Bond length (Angstroms)')
plt.ylabel('Energy (Eh)')
plt.xlim(minR, maxR)
plt.ylim(minE, maxE)
if plot_fit == 'screen':
plt.show()
else:
plt.savefig(plot_fit)
core.print_out("\n\tPES fit saved to %s.\n\n" % plot_fit)
core.print_out("\nre = %10.6f A check: %10.6f\n" % (re, recheck))
core.print_out("r0 = %10.6f A\n" % r0)
core.print_out("E at re = %17.10f Eh\n" % e)
core.print_out("we = %10.4f cm-1\n" % we)
core.print_out("wexe = %10.4f cm-1\n" % wexe)
core.print_out("nu = %10.4f cm-1\n" % nu)
core.print_out("ZPVE(we) = %10.4f cm-1\n" % zpve_we)
core.print_out("ZPVE(nu) = %10.4f cm-1\n" % zpve_nu)
core.print_out("Be = %10.4f cm-1\n" % B)
core.print_out("B0 = %10.4f cm-1\n" % B0)
core.print_out("ae = %10.4f cm-1\n" % ae)
core.print_out("De = %10.7f cm-1\n" % de)
results = {
"re" : re,
"r0" : r0,
"we" : we,
"wexe" : wexe,
"nu" : nu,
"E(re)" : e,
"ZPVE(harmonic)" : zpve_we,
"ZPVE(anharmonic)" : zpve_nu,
"Be" : B,
"B0" : B0,
"ae" : ae,
"De" : de
}
return results
|
psi4/psi4
|
psi4/driver/diatomic.py
|
Python
|
lgpl-3.0
| 11,346
|
APPLICATION_ID = "MessageProcessingGraph"
|
Akson/MoveMe
|
Samples/MessageProcessingGraph/__init__.py
|
Python
|
lgpl-3.0
| 41
|
# -*- coding: utf-8 -*-
"""
pygtkhelpers.proxy
~~~~~~~~~~~~~~~~~~
Controllers for managing data display widgets.
:copyright: 2005-2008 by pygtkhelpers Authors
:license: LGPL 2 or later (see README/COPYING/LICENSE)
An example session of using a proxy::
>>> import gtk
>>> from pygtkhelpers.proxy import proxy_for
>>> widget = gtk.Entry()
>>> proxy = proxy_for(widget)
>>> proxy
<GtkEntryProxy object at 0x9aea25c (PyGTKHelperGObjectProxy at 0x9e6ec50)>
>>> proxy.update('hello')
>>> proxy.read()
'hello'
>>> def changed(proxy, value):
... print proxy, 'changed to', value
...
...
>>> proxy.connect('changed', changed)
32L
>>> proxy.update('bye bye')
<GtkEntryProxy object at 0x9aea25c (PyGTKHelperGObjectProxy at 0x9e6ec50)> changed to bye bye
>>> widget.get_text()
'bye bye'
>>> widget.set_text('banana')
<GtkEntryProxy object at 0x9aea25c (PyGTKHelperGObjectProxy at 0x9e6ec50)> changed to banana
>>> proxy.read()
'banana'
"""
import gobject
import gtk
from pygtkhelpers.utils import gsignal
from pygtkhelpers.ui.widgets import StringList, SimpleComboBox
class GObjectProxy(gobject.GObject):
"""A proxy for a gtk.Widget
This proxy provides a common api to gtk widgets, so that they can be used
without knowing which specific widget they are. All proxy types should
extend this class.
"""
__gtype_name__ = 'PyGTKHelperGObjectProxy'
gsignal('changed', object)
signal_name = None
def __init__(self, widget):
gobject.GObject.__init__(self)
self.widget = widget
self.connections = []
self.connect_widget()
# public API
def update(self, value):
"""Update the widget's value
"""
self.update_internal(value)
self.emit('changed', self.get_widget_value())
def read(self):
"""Get the widget's value
"""
return self.get_widget_value()
# implementor API
def block(self):
for signal_id in self.connections:
self.widget.handler_block(signal_id)
def unblock(self):
for signal_id in self.connections:
self.widget.handler_unblock(signal_id)
def update_internal(self, value):
"""Update the widget's value without firing a changed signal
"""
self.block()
self.set_widget_value(value)
self.unblock()
def widget_changed(self, *args):
"""Called to indicate that a widget's value has been changed.
This will usually be called from a proxy implementation on response to
whichever signal was connected in `connect_widget`
The `*args` are there so you can use this as a signal handler.
"""
self.emit('changed', self.get_widget_value())
def set_widget_value(self, value):
"""Set the value of the widget.
This will update the view to match the value given. This is called
internally, and is called while the proxy is blocked, so no signals
are emitted from this action.
This method should be overriden in subclasses depending on how a
widget's value is set.
"""
def get_widget_value(self):
"""Get the widget value.
This method should be overridden in subclasses to return a value from
the widget.
"""
def connect_widget(self):
"""Perform the initial connection of the widget
the default implementation will connect to the widgets signal
based on self.signal_name
"""
if self.signal_name is not None:
# None for read only widgets
sid = self.widget.connect(self.signal_name, self.widget_changed)
self.connections.append(sid)
class SinglePropertyGObjectProxy(GObjectProxy):
"""Proxy which uses a single property to set and get the value.
"""
prop_name = None
def set_widget_value(self, value):
return self.widget.set_property(self.prop_name, value)
def get_widget_value(self):
return self.widget.get_property(self.prop_name)
class SingleDelegatedPropertyGObjectProxy(SinglePropertyGObjectProxy):
"""Proxy which uses a delegated property on its widget.
"""
prop_name = None
dprop_name = None
def __init__(self, widget):
self.owidget = widget
widget = widget.get_property(self.dprop_name)
GObjectProxy.__init__(self, widget)
class GtkEntryProxy(SinglePropertyGObjectProxy):
"""Proxy for a gtk.Entry.
"""
prop_name = 'text'
signal_name = 'changed'
class GtkToggleButtonProxy(SinglePropertyGObjectProxy):
"""Proxy for a gtk.ToggleButton.
"""
prop_name = 'active'
signal_name = 'toggled'
class GtkColorButtonProxy(SinglePropertyGObjectProxy):
"""Proxy for a gtk.ColorButton
"""
prop_name = 'color'
signal_name = 'color-set'
class StringListProxy(GObjectProxy):
"""Proxy for a pygtkhelpers.ui.widgets.StringList.
"""
signal_name = 'content-changed'
def get_widget_value(self):
return self.widget.value
def set_widget_value(self, value):
self.widget.value = value
class GtkRangeProxy(GObjectProxy):
"""Base class for widgets employing a gtk.Range.
"""
signal_name = 'value-changed'
def get_widget_value(self):
return self.widget.get_value()
def set_widget_value(self, value):
self.widget.set_value(value)
class GtkFileChooserProxy(GObjectProxy):
"""Proxy for a gtk.FileChooser.
"""
signal_name = 'selection-changed'
def get_widget_value(self):
if self.widget.get_select_multiple():
return self.widget.get_filenames()
else:
return self.widget.get_filename()
def set_widget_value(self, value):
if self.widget.get_select_multiple():
self.widget.unselect_all()
for filename in value:
self.widget.select_file(filename)
else:
self.widget.set_filename(value)
class GtkFontButtonProxy(SinglePropertyGObjectProxy):
"""Proxy for a gtk.FontButton.
"""
signal_name = 'font-set'
prop_name = 'font-name'
class GtkComboBoxProxy(GObjectProxy):
"""Proxy for a gtk.ComboBox.
"""
signal_name = 'changed'
def get_widget_value(self):
if not self.active_row:
return
return self.get_row_value(self.active_row)
def set_widget_value(self, value):
# what a pain in the arse
for i, row in enumerate(self.model):
if self.get_row_value(row) == value:
self.widget.set_active(i)
@property
def active_row(self):
if self.widget.get_active() == -1:
return
return self.model[self.widget.get_active()]
@property
def model(self):
return self.widget.get_model()
def get_row_value(self, row):
row = list(row) # XXX: that sucks
'''
value = row[1:]
if not value:
value = row[0]
elif len(value) == 1:
value = value[0]
'''
value = row[0]
return value
class GtkTextViewProxy(SingleDelegatedPropertyGObjectProxy):
"""Proxy for a gtk.TextView.
"""
signal_name = 'changed'
prop_name = 'text'
dprop_name = 'buffer'
class GtkLabelProxy(SinglePropertyGObjectProxy):
"""Proxy for a gtk.Label.
"""
prop_name = 'label'
class GtkImageProxy(SinglePropertyGObjectProxy):
"""Proxy for a gtk.Image.
"""
prop_name = 'file'
class GtkLinkButtonProxy(SinglePropertyGObjectProxy):
"""Proxy for a gtk.LinkButton.
"""
prop_name = 'uri'
class GtkProgressBarProxy(SinglePropertyGObjectProxy):
"""Proxy for a gtk.ProgressBar.
"""
prop_name = 'fraction'
widget_proxies = {
gtk.Entry: GtkEntryProxy,
gtk.ToggleButton: GtkToggleButtonProxy,
gtk.CheckButton: GtkToggleButtonProxy,
gtk.RadioButton: GtkToggleButtonProxy,
gtk.CheckMenuItem: GtkToggleButtonProxy,
gtk.ColorButton: GtkColorButtonProxy,
gtk.ComboBox: GtkComboBoxProxy,
gtk.SpinButton: GtkRangeProxy,
gtk.HScale: GtkRangeProxy,
gtk.VScale: GtkRangeProxy,
gtk.VScrollbar: GtkRangeProxy,
gtk.HScrollbar: GtkRangeProxy,
gtk.FileChooserButton: GtkFileChooserProxy,
gtk.FileChooserWidget: GtkFileChooserProxy,
gtk.FontButton: GtkFontButtonProxy,
gtk.Label: GtkLabelProxy,
gtk.Image: GtkImageProxy,
gtk.LinkButton: GtkLinkButtonProxy,
gtk.ProgressBar: GtkProgressBarProxy,
gtk.TextView: GtkTextViewProxy,
StringList: StringListProxy,
SimpleComboBox: GtkComboBoxProxy,
}
def proxy_for(widget):
"""Create a proxy for a Widget
:param widget: A gtk.Widget to proxy
This will raise a KeyError if there is no proxy type registered for the
widget type.
"""
proxy_type = widget_proxies.get(widget.__class__)
if proxy_type is None:
raise KeyError('There is no proxy type registered for %r' % widget)
return proxy_type(widget)
class ProxyGroup(gobject.GObject):
"""A controller to handle multiple proxies, and sub-groups
A ProxyGroup is a bridge to reduce multiple proxies and sub-groups to a
single signal based on the key of the individual proxies.
"""
gsignal('changed', object, str, object)
def __init__(self):
gobject.GObject.__init__(self)
def add_proxy(self, name, proxy):
"""Add a proxy to this group
:param name: The name or key of the proxy, which will be emitted with
the changed signal
:param proxy: The proxy instance to add
"""
proxy.connect('changed', self._on_proxy_changed, name)
def add_proxy_for(self, name, widget):
"""Create a proxy for a widget and add it to this group
:param name: The name or key of the proxy, which will be emitted with
the changed signal
:param widget: The widget to create a proxy for
"""
proxy = proxy_for(widget)
self.add_proxy(name, proxy)
def add_group(self, group):
"""Add an existing group to this group and proxy its signals
:param group: The ProxyGroup instance to add
"""
group.connect('changed', self._on_group_changed)
def _on_proxy_changed(self, proxy, value, name):
self.emit('changed', proxy, name, value)
# XXX namespacing
def _on_group_changed(self, group, proxy, value, name):
self.emit('changed', proxy, name, value)
|
wheeler-microfluidics/pygtkhelpers
|
pygtkhelpers/proxy.py
|
Python
|
lgpl-3.0
| 10,691
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides the capability to load netCDF files and interprete them
according to the 'NetCDF Climate and Forecast (CF) Metadata Conventions'.
References:
[CF] NetCDF Climate and Forecast (CF) Metadata conventions, Version 1.5, October, 2010.
[NUG] NetCDF User's Guide, http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
"""
from __future__ import (absolute_import, division, print_function)
from abc import ABCMeta, abstractmethod
from collections import Iterable, MutableMapping
import os
import re
import warnings
import netCDF4
import numpy as np
import numpy.ma as ma
import iris.util
#
# CF parse pattern common to both formula terms and measure CF variables.
#
_CF_PARSE = re.compile(r'''
\s*
(?P<lhs>[\w_]+)
\s*:\s*
(?P<rhs>[\w_]+)
\s*
''', re.VERBOSE)
# NetCDF variable attributes handled by the netCDF4 module and
# therefore automatically classed as "used" attributes.
_CF_ATTRS_IGNORE = set(['_FillValue', 'add_offset', 'missing_value', 'scale_factor', ])
#: Supported dimensionless vertical coordinate reference surface/phemomenon
#: formula terms. Ref: [CF] Appendix D.
reference_terms = dict(atmosphere_sigma_coordinate=['ps'],
atmosphere_hybrid_sigma_pressure_coordinate=['ps'],
atmosphere_hybrid_height_coordinate=['orog'],
atmosphere_sleve_coordinate=['zsurf1', 'zsurf2'],
ocean_sigma_coordinate=['eta'],
ocean_s_coordinate=['eta'],
ocean_sigma_z_coordinate=['eta'],
ocean_s_coordinate_g1=['eta'],
ocean_s_coordinate_g2=['eta'])
################################################################################
class CFVariable(object):
"""Abstract base class wrapper for a CF-netCDF variable."""
__metaclass__ = ABCMeta
#: Name of the netCDF variable attribute that identifies this
#: CF-netCDF variable.
cf_identity = None
def __init__(self, name, data):
# Accessing the list of netCDF attributes is surprisingly slow.
# Since it's used repeatedly, caching the list makes things
# quite a bit faster.
self._nc_attrs = data.ncattrs()
#: NetCDF variable name.
self.cf_name = name
#: NetCDF4 Variable data instance.
self.cf_data = data
#: Collection of CF-netCDF variables associated with this variable.
self.cf_group = None
#: CF-netCDF formula terms that his variable participates in.
self.cf_terms_by_root = {}
self.cf_attrs_reset()
@staticmethod
def _identify_common(variables, ignore, target):
if ignore is None:
ignore = []
if target is None:
target = variables
elif isinstance(target, basestring):
if target not in variables:
raise ValueError('Cannot identify unknown target CF-netCDF variable %r' % target)
target = {target: variables[target]}
else:
raise TypeError('Expect a target CF-netCDF variable name')
return (ignore, target)
@abstractmethod
def identify(self, variables, ignore=None, target=None, warn=True):
"""
Identify all variables that match the criterion for this CF-netCDF variable class.
Args:
* variables:
Dictionary of netCDF4.Variable instance by variable name.
Kwargs:
* ignore:
List of variable names to ignore.
* target:
Name of a single variable to check.
* warn:
Issue a warning if a missing variable is referenced.
Returns:
Dictionary of CFVariable instance by variable name.
"""
pass
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
result = set(self.dimensions).issubset(cf_variable.dimensions)
return result
def __eq__(self, other):
# CF variable names are unique.
return self.cf_name == other.cf_name
def __ne__(self, other):
# CF variable names are unique.
return self.cf_name != other.cf_name
def __getattr__(self, name):
# Accessing netCDF attributes is surprisingly slow. Since
# they're often read repeatedly, caching the values makes things
# quite a bit faster.
if name in self._nc_attrs:
self._cf_attrs.add(name)
value = getattr(self.cf_data, name)
setattr(self, name, value)
return value
def __getitem__(self, key):
return self.cf_data.__getitem__(key)
def __len__(self):
return self.cf_data.__len__()
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.cf_name, self.cf_data)
def cf_attrs(self):
"""Return a list of all attribute name and value pairs of the CF-netCDF variable."""
return tuple((attr, self.getncattr(attr))
for attr in sorted(self._nc_attrs))
def cf_attrs_ignored(self):
"""Return a list of all ignored attribute name and value pairs of the CF-netCDF variable."""
return tuple((attr, self.getncattr(attr)) for attr in
sorted(set(self._nc_attrs) & _CF_ATTRS_IGNORE))
def cf_attrs_used(self):
"""Return a list of all accessed attribute name and value pairs of the CF-netCDF variable."""
return tuple((attr, self.getncattr(attr)) for attr in
sorted(self._cf_attrs))
def cf_attrs_unused(self):
"""Return a list of all non-accessed attribute name and value pairs of the CF-netCDF variable."""
return tuple((attr, self.getncattr(attr)) for attr in
sorted(set(self._nc_attrs) - self._cf_attrs))
def cf_attrs_reset(self):
"""Reset the history of accessed attribute names of the CF-netCDF variable."""
self._cf_attrs = set([item[0] for item in self.cf_attrs_ignored()])
def add_formula_term(self, root, term):
"""
Register the participation of this CF-netCDF variable in a CF-netCDF formula term.
Args:
* root (string):
The name of CF-netCDF variable that defines the CF-netCDF formula_terms attribute.
* term (string):
The associated term name of this variable in the formula_terms definition.
Returns:
None.
"""
self.cf_terms_by_root[root] = term
def has_formula_terms(self):
"""
Determine whether this CF-netCDF variable participates in a CF-netcdf formula term.
Returns:
Boolean.
"""
return bool(self.cf_terms_by_root)
class CFAncillaryDataVariable(CFVariable):
"""
A CF-netCDF ancillary data variable is a variable that provides metadata
about the individual values of another data variable.
Identified by the CF-netCDF variable attribute 'ancillary_variables'.
Ref: [CF] Section 3.4. Ancillary Data.
"""
cf_identity = 'ancillary_variables'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF ancillary data variables.
for nc_var_name, nc_var in target.iteritems():
# Check for ancillary data variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF ancillary data variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFAncillaryDataVariable(name, variables[name])
return result
class CFAuxiliaryCoordinateVariable(CFVariable):
"""
A CF-netCDF auxiliary coordinate variable is any netCDF variable that contains
coordinate data, but is not a CF-netCDF coordinate variable by definition.
There is no relationship between the name of a CF-netCDF auxiliary coordinate
variable and the name(s) of its dimension(s).
Identified by the CF-netCDF variable attribute 'coordinates'.
Also see :class:`iris.fileformats.cf.CFLabelVariable`.
Ref: [CF] Chapter 5. Coordinate Systems.
[CF] Section 6.2. Alternative Coordinates.
"""
cf_identity = 'coordinates'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF auxiliary coordinate variables.
for nc_var_name, nc_var in target.iteritems():
# Check for auxiliary coordinate variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF auxiliary coordinate variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
# Restrict to non-string type i.e. not a CFLabelVariable.
if not np.issubdtype(variables[name].dtype, np.str):
result[name] = CFAuxiliaryCoordinateVariable(name, variables[name])
return result
class CFBoundaryVariable(CFVariable):
"""
A CF-netCDF boundary variable is associated with a CF-netCDF variable that contains
coordinate data. When a data value provides information about conditions in a cell
occupying a region of space/time or some other dimension, the boundary variable
provides a description of cell extent.
A CF-netCDF boundary variable will have one more dimension than its associated
CF-netCDF coordinate variable or CF-netCDF auxiliary coordinate variable.
Identified by the CF-netCDF variable attribute 'bounds'.
Ref: [CF] Section 7.1. Cell Boundaries.
"""
cf_identity = 'bounds'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF boundary variables.
for nc_var_name, nc_var in target.iteritems():
# Check for a boundary variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF boundary variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFBoundaryVariable(name, variables[name])
return result
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore the bounds extent dimension.
result = set(source[:-1]).issubset(target) or \
set(source[1:]).issubset(target)
return result
class CFClimatologyVariable(CFVariable):
"""
A CF-netCDF climatology variable is associated with a CF-netCDF variable that contains
coordinate data. When a data value provides information about conditions in a cell
occupying a region of space/time or some other dimension, the climatology variable
provides a climatological description of cell extent.
A CF-netCDF climatology variable will have one more dimension than its associated
CF-netCDF coordinate variable.
Identified by the CF-netCDF variable attribute 'climatology'.
Ref: [CF] Section 7.4. Climatological Statistics
"""
cf_identity = 'climatology'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF climatology variables.
for nc_var_name, nc_var in target.iteritems():
# Check for a climatology variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF climatology variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFClimatologyVariable(name, variables[name])
return result
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore the climatology extent dimension.
result = set(source[:-1]).issubset(target) or \
set(source[1:]).issubset(target)
return result
class CFCoordinateVariable(CFVariable):
"""
A CF-netCDF coordinate variable is a one-dimensional variable with the same name
as its dimension, and it is defined as a numeric data type with values that are
ordered monotonically. Missing values are not allowed in CF-netCDF coordinate
variables. Also see [NUG] Section 2.3.1.
Identified by the above criterion, there is no associated CF-netCDF variable
attribute.
Ref: [CF] 1.2. Terminology.
"""
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True, monotonic=False):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF coordinate variables.
for nc_var_name, nc_var in target.iteritems():
if nc_var_name in ignore:
continue
# String variables can't be coordinates
if np.issubdtype(nc_var.dtype, np.str):
continue
# Restrict to one-dimensional with name as dimension OR zero-dimensional scalar
if not ((nc_var.ndim == 1 and nc_var_name in nc_var.dimensions) or (nc_var.ndim == 0)):
continue
# Restrict to monotonic?
if monotonic:
data = nc_var[:]
# Gracefully fill a masked coordinate.
if ma.isMaskedArray(data):
data = ma.filled(data)
if nc_var.shape == () or nc_var.shape == (1,) or iris.util.monotonic(data):
result[nc_var_name] = CFCoordinateVariable(nc_var_name, nc_var)
else:
result[nc_var_name] = CFCoordinateVariable(nc_var_name, nc_var)
return result
class CFDataVariable(CFVariable):
"""
A CF-netCDF variable containing data pay-load that maps to an Iris :class:`iris.cube.Cube`.
"""
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
raise NotImplementedError
class _CFFormulaTermsVariable(CFVariable):
"""
A CF-netCDF formula terms variable corresponds to a term in a formula that
allows dimensional vertical coordinate values to be computed from dimensionless
vertical coordinate values and associated variables at specific grid points.
Identified by the CF-netCDF variable attribute 'formula_terms'.
Ref: [CF] Section 4.3.2. Dimensional Vertical Coordinate.
[CF] Appendix D. Dimensionless Vertical Coordinates.
"""
cf_identity = 'formula_terms'
def __init__(self, name, data, formula_root, formula_term):
CFVariable.__init__(self, name, data)
# Register the formula root and term relationship.
self.add_formula_term(formula_root, formula_term)
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF formula terms variables.
for nc_var_name, nc_var in target.iteritems():
# Check for formula terms variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for match_item in _CF_PARSE.finditer(nc_var_att):
match_group = match_item.groupdict()
# Ensure that term name is lower case, as expected.
term_name = match_group['lhs'].lower()
variable_name = match_group['rhs']
if variable_name not in ignore:
if variable_name not in variables:
if warn:
message = 'Missing CF-netCDF formula term variable %r, referenced by netCDF variable %r'
warnings.warn(message % (variable_name, nc_var_name))
else:
if variable_name not in result:
result[variable_name] = _CFFormulaTermsVariable(variable_name,
variables[variable_name],
nc_var_name, term_name)
else:
result[variable_name].add_formula_term(nc_var_name, term_name)
return result
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.cf_name, self.cf_data,
self.cf_terms_by_root)
class CFGridMappingVariable(CFVariable):
"""
A CF-netCDF grid mapping variable contains a list of specific attributes that
define a particular grid mapping. A CF-netCDF grid mapping variable must contain
the attribute 'grid_mapping_name'.
Based on the value of the 'grid_mapping_name' attribute, there are associated
standard names of CF-netCDF coordinate variables that contain the mapping's
independent variables.
Identified by the CF-netCDF variable attribute 'grid_mapping'.
Ref: [CF] Section 5.6. Horizontal Coordinate Reference Systems, Grid Mappings, and Projections.
[CF] Appendix F. Grid Mappings.
"""
cf_identity = 'grid_mapping'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all grid mapping variables.
for nc_var_name, nc_var in target.iteritems():
# Check for a grid mapping variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF grid mapping variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFGridMappingVariable(name, variables[name])
return result
class CFLabelVariable(CFVariable):
"""
A CF-netCDF CF label variable is any netCDF variable that contain string
textual information, or labels.
Identified by the CF-netCDF variable attribute 'coordinates'.
Also see :class:`iris.fileformats.cf.CFAuxiliaryCoordinateVariable`.
Ref: [CF] Section 6.1. Labels.
"""
cf_identity = 'coordinates'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF label variables.
for nc_var_name, nc_var in target.iteritems():
# Check for label variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF label variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
# Restrict to only string type.
if np.issubdtype(variables[name].dtype, np.str):
result[name] = CFLabelVariable(name, variables[name])
return result
def cf_label_data(self, cf_data_var):
"""
Return the associated CF-netCDF label variable strings.
Args:
* cf_data_var (:class:`iris.fileformats.cf.CFDataVariable`):
The CF-netCDF data variable which the CF-netCDF label variable describes.
Returns:
String labels.
"""
if not isinstance(cf_data_var, CFDataVariable):
raise TypeError('cf_data_var argument should be of type CFDataVariable. Got %r.' % type(cf_data_var))
# Determine the name of the label string (or length) dimension by
# finding the dimension name that doesn't exist within the data dimensions.
str_dim_name = list(set(self.dimensions) - set(cf_data_var.dimensions))
if len(str_dim_name) != 1:
raise ValueError('Invalid string dimensions for CF-netCDF label variable %r' % self.cf_name)
str_dim_name = str_dim_name[0]
label_data = self[:]
if isinstance(label_data, ma.MaskedArray):
label_data = label_data.filled()
# Determine whether we have a string-valued scalar label
# i.e. a character variable that only has one dimension (the length of the string).
if self.ndim == 1:
data = np.array([''.join(label_data).strip()])
else:
# Determine the index of the string dimension.
str_dim = self.dimensions.index(str_dim_name)
# Calculate new label data shape (without string dimension) and create payload array.
new_shape = tuple(dim_len for i, dim_len in enumerate(self.shape) if i != str_dim)
data = np.empty(new_shape, dtype='|S%d' % self.shape[str_dim])
for index in np.ndindex(new_shape):
# Create the slice for the label data.
if str_dim == 0:
label_index = (slice(None, None),) + index
else:
label_index = index + (slice(None, None),)
data[index] = ''.join(label_data[label_index]).strip()
return data
def cf_label_dimensions(self, cf_data_var):
"""
Return the name of the associated CF-netCDF label variable data dimensions.
Args:
* cf_data_var (:class:`iris.fileformats.cf.CFDataVariable`):
The CF-netCDF data variable which the CF-netCDF label variable describes.
Returns:
Tuple of label data dimension names.
"""
if not isinstance(cf_data_var, CFDataVariable):
raise TypeError('cf_data_var argument should be of type CFDataVariable. Got %r.' % type(cf_data_var))
return tuple([dim_name for dim_name in self.dimensions if dim_name in cf_data_var.dimensions])
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore label string length dimension.
result = set(source[:-1]).issubset(target) or \
set(source[1:]).issubset(target)
return result
class CFMeasureVariable(CFVariable):
"""
A CF-netCDF measure variable is a variable that contains cell areas or volumes.
Identified by the CF-netCDF variable attribute 'cell_measures'.
Ref: [CF] Section 7.2. Cell Measures.
"""
cf_identity = 'cell_measures'
def __init__(self, name, data, measure):
CFVariable.__init__(self, name, data)
#: Associated cell measure of the cell variable
self.cf_measure = measure
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF measure variables.
for nc_var_name, nc_var in target.iteritems():
# Check for measure variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for match_item in _CF_PARSE.finditer(nc_var_att):
match_group = match_item.groupdict()
measure = match_group['lhs']
variable_name = match_group['rhs']
if variable_name not in ignore:
if variable_name not in variables:
if warn:
message = 'Missing CF-netCDF measure variable %r, referenced by netCDF variable %r'
warnings.warn(message % (variable_name, nc_var_name))
else:
result[variable_name] = CFMeasureVariable(variable_name, variables[variable_name], measure)
return result
################################################################################
class CFGroup(MutableMapping, object):
"""
Represents a collection of 'NetCDF Climate and Forecast (CF) Metadata
Conventions' variables and netCDF global attributes.
"""
def __init__(self):
#: Collection of CF-netCDF variables
self._cf_variables = {}
#: Collection of netCDF global attributes
self.global_attributes = {}
#: Collection of CF-netCDF variables promoted to a CFDataVariable.
self.promoted = {}
def _cf_getter(self, cls):
# Generate dictionary with dictionary comprehension.
return {cf_name:cf_var for cf_name, cf_var in self._cf_variables.iteritems() if isinstance(cf_var, cls)}
@property
def ancillary_variables(self):
"""Collection of CF-netCDF ancillary variables."""
return self._cf_getter(CFAncillaryDataVariable)
@property
def auxiliary_coordinates(self):
"""Collection of CF-netCDF auxiliary coordinate variables."""
return self._cf_getter(CFAuxiliaryCoordinateVariable)
@property
def bounds(self):
"""Collection of CF-netCDF boundary variables."""
return self._cf_getter(CFBoundaryVariable)
@property
def climatology(self):
"""Collection of CF-netCDF climatology variables."""
return self._cf_getter(CFClimatologyVariable)
@property
def coordinates(self):
"""Collection of CF-netCDF coordinate variables."""
return self._cf_getter(CFCoordinateVariable)
@property
def data_variables(self):
"""Collection of CF-netCDF data pay-load variables."""
return self._cf_getter(CFDataVariable)
@property
def formula_terms(self):
"""Collection of CF-netCDF variables that participate in a CF-netCDF formula term."""
return {cf_name:cf_var for cf_name, cf_var in self._cf_variables.iteritems() if cf_var.has_formula_terms()}
@property
def grid_mappings(self):
"""Collection of CF-netCDF grid mapping variables."""
return self._cf_getter(CFGridMappingVariable)
@property
def labels(self):
"""Collection of CF-netCDF label variables."""
return self._cf_getter(CFLabelVariable)
@property
def cell_measures(self):
"""Collection of CF-netCDF measure variables."""
return self._cf_getter(CFMeasureVariable)
def keys(self):
"""Return the names of all the CF-netCDF variables in the group."""
return self._cf_variables.keys()
def __len__(self):
return len(self._cf_variables)
def __iter__(self):
for item in self._cf_variables:
yield item
def __setitem__(self, name, variable):
if not isinstance(variable, CFVariable):
raise TypeError('Attempted to add an invalid CF-netCDF variable to the %s' % self.__class__.__name__)
if name != variable.cf_name:
raise ValueError('Mismatch between key name %r and CF-netCDF variable name %r' % (str(name), variable.cf_name))
self._cf_variables[name] = variable
def __getitem__(self, name):
if name not in self._cf_variables:
raise KeyError('Cannot get unknown CF-netCDF variable name %r' % str(name))
return self._cf_variables[name]
def __delitem__(self, name):
if name not in self._cf_variables:
raise KeyError('Cannot delete unknown CF-netcdf variable name %r' % str(name))
del self._cf_variables[name]
def __repr__(self):
result = []
result.append('variables:%d' % len(self._cf_variables))
result.append('global_attributes:%d' % len(self.global_attributes))
result.append('promoted:%d' % len(self.promoted))
return '<%s of %s>' % (self.__class__.__name__, ', '.join(result))
################################################################################
class CFReader(object):
"""
This class allows the contents of a netCDF file to be interpreted according
to the 'NetCDF Climate and Forecast (CF) Metadata Conventions'.
"""
def __init__(self, filename, warn=False, monotonic=False):
self._filename = os.path.expanduser(filename)
# All CF variable types EXCEPT for the "special cases" of
# CFDataVariable, CFCoordinateVariable and _CFFormulaTermsVariable.
self._variable_types = (CFAncillaryDataVariable, CFAuxiliaryCoordinateVariable,
CFBoundaryVariable, CFClimatologyVariable,
CFGridMappingVariable, CFLabelVariable, CFMeasureVariable)
#: Collection of CF-netCDF variables associated with this netCDF file
self.cf_group = CFGroup()
self._dataset = netCDF4.Dataset(self._filename, mode='r')
# Issue load optimisation warning.
if warn and self._dataset.file_format in ['NETCDF3_CLASSIC', 'NETCDF3_64BIT']:
warnings.warn('Optimise CF-netCDF loading by converting data from NetCDF3 ' \
'to NetCDF4 file format using the "nccopy" command.')
self._check_monotonic = monotonic
self._translate()
self._build_cf_groups()
self._reset()
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._filename)
def _translate(self):
"""Classify the netCDF variables into CF-netCDF variables."""
netcdf_variable_names = list(self._dataset.variables.keys())
# Identify all CF coordinate variables first. This must be done
# first as, by CF convention, the definition of a CF auxiliary
# coordinate variable may include a scalar CF coordinate variable,
# whereas we want these two types of variables to be mutually exclusive.
coords = CFCoordinateVariable.identify(self._dataset.variables,
monotonic=self._check_monotonic)
self.cf_group.update(coords)
coordinate_names = list(self.cf_group.coordinates.keys())
# Identify all CF variables EXCEPT for the "special cases".
for variable_type in self._variable_types:
# Prevent grid mapping variables being mis-identified as CF coordinate variables.
ignore = None if issubclass(variable_type, CFGridMappingVariable) else coordinate_names
self.cf_group.update(variable_type.identify(self._dataset.variables, ignore=ignore))
# Identify global netCDF attributes.
attr_dict = {attr_name: _getncattr(self._dataset, attr_name, '') for
attr_name in self._dataset.ncattrs()}
self.cf_group.global_attributes.update(attr_dict)
# Identify and register all CF formula terms.
formula_terms = _CFFormulaTermsVariable.identify(self._dataset.variables)
for cf_var in formula_terms.itervalues():
for cf_root, cf_term in cf_var.cf_terms_by_root.iteritems():
# Ignore formula terms owned by a bounds variable.
if cf_root not in self.cf_group.bounds:
cf_name = cf_var.cf_name
if cf_var.cf_name not in self.cf_group:
self.cf_group[cf_name] = CFAuxiliaryCoordinateVariable(cf_name, cf_var.cf_data)
self.cf_group[cf_name].add_formula_term(cf_root, cf_term)
# Determine the CF data variables.
data_variable_names = set(netcdf_variable_names) - set(self.cf_group.ancillary_variables) - \
set(self.cf_group.auxiliary_coordinates) - set(self.cf_group.bounds) - \
set(self.cf_group.climatology) - set(self.cf_group.coordinates) - \
set(self.cf_group.grid_mappings) - set(self.cf_group.labels) - \
set(self.cf_group.cell_measures)
for name in data_variable_names:
self.cf_group[name] = CFDataVariable(name, self._dataset.variables[name])
def _build_cf_groups(self):
"""Build the first order relationships between CF-netCDF variables."""
def _build(cf_variable):
coordinate_names = list(self.cf_group.coordinates.keys())
cf_group = CFGroup()
# Build CF variable relationships.
for variable_type in self._variable_types:
# Prevent grid mapping variables being mis-identified as
# CF coordinate variables.
ignore = None if issubclass(variable_type, CFGridMappingVariable) else coordinate_names
match = variable_type.identify(self._dataset.variables, ignore=ignore,
target=cf_variable.cf_name, warn=False)
# Sanity check dimensionality coverage.
for cf_name, cf_var in match.iteritems():
if cf_var.spans(cf_variable):
cf_group[cf_name] = self.cf_group[cf_name]
else:
# Register the ignored variable.
# N.B. 'ignored' variable from enclosing scope.
ignored.add(cf_name)
msg = 'Ignoring variable {!r} referenced ' \
'by variable {!r}: Dimensions {!r} do not ' \
'span {!r}'.format(cf_name,
cf_variable.cf_name,
cf_var.dimensions,
cf_variable.dimensions)
warnings.warn(msg)
# Build CF data variable relationships.
if isinstance(cf_variable, CFDataVariable):
# Add global netCDF attributes.
cf_group.global_attributes.update(self.cf_group.global_attributes)
# Add appropriate "dimensioned" CF coordinate variables.
cf_group.update({cf_name: self.cf_group[cf_name] for cf_name
in cf_variable.dimensions if cf_name in
self.cf_group.coordinates})
# Add appropriate "dimensionless" CF coordinate variables.
coordinates_attr = getattr(cf_variable, 'coordinates', '')
cf_group.update({cf_name: self.cf_group[cf_name] for cf_name
in coordinates_attr.split() if cf_name in
self.cf_group.coordinates})
# Add appropriate formula terms.
for cf_var in self.cf_group.formula_terms.itervalues():
for cf_root in cf_var.cf_terms_by_root:
if cf_root in cf_group and cf_var.cf_name not in cf_group:
# Sanity check dimensionality.
if cf_var.spans(cf_variable):
cf_group[cf_var.cf_name] = cf_var
else:
# Register the ignored variable.
# N.B. 'ignored' variable from enclosing scope.
ignored.add(cf_var.cf_name)
msg = 'Ignoring formula terms variable {!r} ' \
'referenced by data variable {!r} via ' \
'variable {!r}: Dimensions {!r} do not ' \
'span {!r}'.format(cf_var.cf_name,
cf_variable.cf_name,
cf_root,
cf_var.dimensions,
cf_variable.dimensions)
warnings.warn(msg)
# Add the CF group to the variable.
cf_variable.cf_group = cf_group
# Ignored variables are those that cannot be attached to a
# data variable as the dimensionality of that variable is not
# a subset of the dimensionality of the data variable.
ignored = set()
for cf_variable in self.cf_group.itervalues():
_build(cf_variable)
# Determine whether there are any formula terms that
# may be promoted to a CFDataVariable.
if iris.FUTURE.netcdf_promote:
# Restrict promotion to only those formula terms
# that are reference surface/phenomenon.
for cf_var in self.cf_group.formula_terms.itervalues():
for cf_root, cf_term in cf_var.cf_terms_by_root.iteritems():
cf_root_var = self.cf_group[cf_root]
name = cf_root_var.standard_name or cf_root_var.long_name
terms = reference_terms.get(name, [])
if isinstance(terms, basestring) or \
not isinstance(terms, Iterable):
terms = [terms]
cf_var_name = cf_var.cf_name
if cf_term in terms and \
cf_var_name not in self.cf_group.promoted:
data_var = CFDataVariable(cf_var_name, cf_var.cf_data)
self.cf_group.promoted[cf_var_name] = data_var
_build(data_var)
break
# Promote any ignored variables.
promoted = set()
not_promoted = ignored.difference(promoted)
while not_promoted:
cf_name = not_promoted.pop()
if cf_name not in self.cf_group.data_variables and \
cf_name not in self.cf_group.promoted:
data_var = CFDataVariable(cf_name,
self.cf_group[cf_name].cf_data)
self.cf_group.promoted[cf_name] = data_var
_build(data_var)
# Determine whether there are still any ignored variables
# yet to be promoted.
promoted.add(cf_name)
not_promoted = ignored.difference(promoted)
def _reset(self):
"""Reset the attribute touch history of each variable."""
for nc_var_name in self._dataset.variables.iterkeys():
self.cf_group[nc_var_name].cf_attrs_reset()
def __del__(self):
# Explicitly close dataset to prevent file remaining open.
self._dataset.close()
def _getncattr(dataset, attr, default=None):
"""
Simple wrapper round `netCDF4.Dataset.getncattr` to make it behave
more like `getattr`.
"""
try:
value = dataset.getncattr(attr)
except AttributeError:
value = default
return value
|
Jozhogg/iris
|
lib/iris/fileformats/cf.py
|
Python
|
lgpl-3.0
| 43,610
|
# -*- coding: utf-8 -*-
"""User models."""
from sunshine.database import (
Column, CRUDMixin, Model, SurrogatePK, db, relationship)
class Station(SurrogatePK, Model, CRUDMixin):
"""A weather station.
name: name of the station
lat: latitude of the station, float, degrees
lon: longitude of the station, float, degrees
"""
__tablename__ = 'stations'
name = Column(db.String(80), unique=True, nullable=False)
lat = Column(db.Float)
lon = Column(db.Float)
altitude = Column(db.Float)
def __init__(self, name, **kwargs):
"""Create instance."""
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<Station({name})>'.format(name=self.name)
class Observation(SurrogatePK, Model, CRUDMixin):
""" A weather observation.
station: id
time: date and time of observation
temperature: temperature in degrees C
pressure: pressure reading at location, float, hectopascals or millibars
humidity: 0.0 - 1.0, with 1.0 meaning 100% humidity, float
windspeed: speed of wind in knots
winddirection: wind direction, 0-360, float, degrees
"""
__tablename__ = 'observations'
station_id = db.Column(db.Integer, db.ForeignKey('stations.id'))
station = db.relationship('Station',
backref=db.backref('observations', lazy='dynamic'))
time = Column(db.DateTime, nullable=False)
temperature = Column(db.Float, nullable=True)
pressure = Column(db.Float, nullable=True)
humidity = Column(db.Float, nullable=True)
windspeed = Column(db.Float, nullable=True)
winddirection = Column(db.Float, nullable=True)
|
peakrisk/sunshine
|
sunshine/rays/models.py
|
Python
|
lgpl-3.0
| 1,725
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests,json,urllib,re,oauth2 as oauth
from datetime import datetime
from time import mktime,strptime
from django.contrib.auth.models import User
from django.http import HttpResponse as response
from django.http import HttpResponseRedirect as redirect
from django.shortcuts import render
from django.db import IntegrityError
from .models import *
from .main import Efforia
from .stream import Dropbox
class Search(Efforia):
def __init__(self): pass
def explore(self,request):
try: query = request.GET['explore']
except KeyError as e: query = ''
u = self.current_user(request)
others = [x['id'] for x in Profile.objects.values('id')]
objects = self.feed(u,others)
filter(lambda obj: query.lower() in obj.name.lower(),objects)
return self.view_mosaic(request,objects)
class Follows(Efforia):
def __init__(self): pass
def view_following(self,request):
u = self.current_user(request); rels = []
for f in Followed.objects.filter(follower=u.id):
rels.append(Profile.objects.filter(user_id=f.followed)[0])
request.COOKIES['permissions'] = 'view_only'
return self.view_mosaic(request,rels)
def become_follower(self,request):
u = self.current_user(request).id
followed = Profile.objects.filter(id=request.GET['profile_id'])[0].user_id
follow = Followed(followed=followed,follower=u)
follow.save()
return response('Profile followed successfully')
def leave_follower(self,request):
u = self.current_user(request).id
followed = request.GET['profile_id']
query = Followed.objects.filter(followed=followed,follower=u)
if len(query): query[0].delete()
return response('Profile unfollowed successfully')
class ID(Efforia):
def __init__(self): pass
def view_id(self,request):
u = self.current_user(request)
if 'first_turn' in request.GET:
if u.profile.first_turn: return response('yes')
else: return response('no')
elif 'object' in request.GET:
o,t = request.GET['object'][0].split(';')
now,objs,rels = self.get_object_bydate(o,t)
obj = globals()[objs].objects.all().filter(date=now)[0]
if hasattr(obj,'user'): return response(str(obj.user.id))
else: return response(str(self.current_user().id))
else: return response(str(self.current_user().id))
def finish_tutorial(self,request):
u = self.current_user(request)
p = Profile.objects.all().filter(user=u)[0]
p.first_time = False
p.save()
return response('Tutorial finalizado.')
class Deletes(Efforia):
def delete_element(self,request):
oid = request.GET['id']
modobj = settings.EFFORIA_TOKENS[request.GET['token']]
module,obj = modobj.split('.')
o = self.class_module('%s.models'%module,obj)
query = o.objects.filter(id=oid)
if len(query): query[0].delete()
return response('Object deleted successfully')
class Tutorial(Efforia):
def view_tutorial(self,request):
social = False if 'social' not in request.GET else True
return render(request,'tutorial.jade',{'static_url':settings.STATIC_URL,'social':social})
def update_profile(self,request,url,user):
birthday = career = bio = ''
p = user.profile
for k,v in request.POST.iteritems():
if 'birth' in k: p.birthday = self.convert_datetime(v)
elif 'career' in k: p.career = v
elif 'bio' in k: p.bio = v
p.save()
return response('Added informations to profile successfully')
#return redirect(url)
def finish_tutorial(self,request):
whitespace = ' '
data = request.POST
name = request.COOKIES['username']
u = User.objects.filter(username=name)[0]
if 'name' in data:
lname = data['name'].split()
u.first_name,u.last_name = lname[0],whitespace.join(lname[1:])
u.save()
request.session['user'] = name
if len(request.POST) is 0: return response('Added informations to profile successfully')#return redirect('/')
else: return self.update_profile(request,'/',u)
class Authentication(Efforia):
def social_update(self,request,typesoc,profile):
data = request.REQUEST
u = self.current_user(request)
p = Profile.objects.filter(user=u)[0]
if 'google' in typesoc: p.google_token = profile['google_token']
elif 'twitter' in typesoc: p.twitter_token = '%s;%s' % (profile['key'],profile['secret'])
elif 'facebook' in typesoc: p.facebook_token = profile['facebook_token']
p.save()
return redirect('/')
def social_register(self,request,typesoc,profile):
data = request.REQUEST
whitespace = ' '; r = None;
facebook = twitter = google = ''
if 'google' in typesoc:
username = profile['name'].lower()
google = profile['google_token']
elif 'twitter' in typesoc:
username = profile['screen_name']
twitter = '%s;%s' % (profile['key'],profile['secret'])
elif 'facebook' in typesoc:
username = profile['link'].split('/')[-1:][0]
facebook = profile['facebook_token']
# Ja registrado, fazer login social
if len(list(User.objects.filter(username=username))) > 0:
request.session['user'] = username
r = redirect('/')
# Nao registrado, gravando chaves sociais e perfil e redirecionando para tutorial
else:
u = User.objects.create_user(username,password=User.objects.make_random_password())
p = Profile(user=u,facebook_token=facebook,twitter_token=twitter,google_token=google)
p.save()
r = redirect('tutorial?social=%s'%data['social'])
r.set_cookie('username',username)
r.set_cookie('permissions','super')
return r
def authenticate(self,request):
data = request.REQUEST
if 'profile' in data:
profile = self.json_decode(data['profile']); t = data['social']
# Atualizacao do perfil com tokens sociais
if 'user' in request.session: return self.social_update(request,t,profile)
# Registro do perfil com token social
else: return self.social_register(request,t,profile)
elif 'username' not in data or 'password' not in data:
return response(json.dumps({'error':'User or password missing'}),mimetype='application/json')
else:
username = data['username']
password = data['password']
exists = User.objects.filter(username=username)
if exists:
if exists[0].check_password(password):
obj = json.dumps({'username':username,'userid':exists[0].id})
request.session['user'] = username
r = response(json.dumps({'success':'Login successful'}),
mimetype = 'application/json')
r.set_cookie('permissions','super')
return r
else:
obj = json.dumps({'error':'User or password wrong'})
return response(obj,mimetype='application/json')
def leave(self,request):
del request.session['user']
return response(json.dumps({'success':'Logout successful'}),mimetype='application/json')
def view_register(self,request):
return render(request,'register.html',{'static_url':settings.STATIC_URL,'hostname':request.get_host()},content_type='text/html')
def participate(self,request):
whitespace = ' '
username = password = first_name = last_name = ''
for k,v in request.POST.iteritems():
if 'username' in k:
u = User.objects.filter(username=v)
if len(u) > 0: return response('Username already exists')
else: username = v
elif 'password' in k:
if v not in request.POST['repeatpassword']: return response('Password mismatch')
else: password = v
elif 'name' in k: first_name,last_name = whitespace.join(v.split()[:1]),whitespace.join(v.split()[1:])
user = User(username=username,first_name=first_name,last_name=last_name)
user.set_password(password)
user.save()
r = redirect('tutorial')
r.set_cookie('username',username)
r.set_cookie('permissions','super')
return r
class Twitter(Efforia):
def update_status(self,request):
u = self.current_user(request)
if len(request.GET['content']) > 137:
short = unicode('%s...' % (request.GET['content'][:137]))
else: short = unicode('%s' % (request.GET['content']))
tokens = u.profile.twitter_token
if not tokens: tokens = self.own_access()['twitter_token']
data = {'status':short.encode('utf-8')}
self.oauth_post_request('/statuses/update.json',tokens,data,'twitter')
return response('Published posting successfully on Twitter')
class Facebook(Efforia):
def update_status(self,request):
u = self.current_user(request)
token = u.profile.facebook_token
text = unicode('%s' % request.GET['content'])
data = {'message':text.encode('utf-8')}
if 'id' in request.REQUEST: url = '/%s/feed' % request.REQUEST['id']
else: url = '/me/feed'
self.oauth_post_request(url,token,data,'facebook')
return response('Published posting successfully on Facebook')
def send_event(self,request):
u = self.current_user(request)
token = u.profile.facebook_token
name = dates = descr = local = value = ''
for k,v in request.REQUEST.iteritems():
if 'name' in k: name = v.encode('utf-8')
elif 'deadline' in k: dates = v
elif 'description' in k: descr = v.encode('utf-8')
elif 'location' in k: local = v.encode('utf-8')
elif 'value' in k: value = v
date = self.convert_datetime(dates)
url = 'http://%s/efforia/basket?alt=redir&id=%s&value=%s&token=@@'%(settings.EFFORIA_URL,value,name)
data = {'name':name,'start_time':date,'description':descr,'location':local,'ticket_uri':url}
id = json.loads(self.oauth_post_request("/me/events",token,data,'facebook'))['id']
return response(id)
def send_event_cover(self,request):
u = self.current_user(request)
token = u.profile.facebook_token
ident = request.REQUEST['id']
photo = request.REQUEST['url']
self.oauth_post_request('/%s'%ident,token,{'cover_url':photo},'facebook')
return response('Published image cover on event successfully on Facebook')
class Coins(Efforia):
def discharge(self,request):
userid = request.REQUEST['userid']
values = request.REQUEST['value']
u = Profile.objects.filter(user=(userid))[0]
u.credit -= int(values)
u.save()
j = json.dumps({'objects':{
'userid':userid,
'value':u.credit
}})
return response(j,mimetype='application/json')
def recharge(self,request):
userid = request.REQUEST['userid']
values = request.REQUEST['value']
u = Profile.objects.filter(user=(userid))[0]
u.credit += int(values)
u.save()
json.dumps({'objects':{
'userid': userid,
'value': u.credit
}})
return response(j,mimetype='application/json')
def balance(self,request):
userid = request.GET['userid']
json.dumps({'objects':{
'userid': userid,
'value': Profile.objects.filter(user=int(userid))[0].credit
}})
return response(j,mimetype='application/json')
|
efforia/eos-dashboard
|
pandora-ckz/pandora/social.py
|
Python
|
lgpl-3.0
| 12,049
|
import sys
import pytest
from utils import *
from addons import *
subject1 = """
3 au
Co 0 0 0
H 2 0 0
h_OTher -2 0 0
"""
ans1_au = """3 au
CoH2
Co 0.000000000000 0.000000000000 0.000000000000
H 2.000000000000 0.000000000000 0.000000000000
H -2.000000000000 -0.000000000000 0.000000000000"""
ans1_ang = """3
CoH2
Co 0.000000000000 0.000000000000 0.000000000000
H 1.058354417180 0.000000000000 0.000000000000
H -1.058354417180 -0.000000000000 0.000000000000"""
ans1c_ang = """3
CoH2
59Co 0.00000000 0.00000000 0.00000000
1H 1.05835442 0.00000000 0.00000000
1H_other -1.05835442 -0.00000000 0.00000000"""
#subject2 = """
#Co 0 0 0
#units au
#no_reorient
#--
#@H 2 0 0
#h_OTher -2 0 0
#"""
#
#ans2_au = """3 au
#
#Co 0.000000000000 0.000000000000 0.000000000000
#@H 2.000000000000 0.000000000000 0.000000000000
#H -2.000000000000 0.000000000000 0.000000000000"""
#
#ans2_ang = """3
#
#Co 0.000000000000 0.000000000000 0.000000000000
#Gh(1) 1.058354417180 0.000000000000 0.000000000000
#H -1.058354417180 0.000000000000 0.000000000000"""
#
#ans2c_ang = """2
#
#Co 0.000000000000 0.000000000000 0.000000000000
#H -1.058354417180 0.000000000000 0.000000000000"""
subject2 = """
Co 0 0 0
no_reorient
--
@H 1.05835441718 0 0
h_OTher -1.05835441718 0 0
"""
ans2_au = """3 au
CoH2
Co 0.000000000000 0.000000000000 0.000000000000
@H 2.000000000000 0.000000000000 0.000000000000
H -2.000000000000 0.000000000000 0.000000000000"""
ans2_ang = """3
CoH2
Co 0.000000000000 0.000000000000 0.000000000000
Gh(1) 1.058354417180 0.000000000000 0.000000000000
H -1.058354417180 0.000000000000 0.000000000000"""
ans2c_ang = """2
CoH2
Co 0.000000000000 0.000000000000 0.000000000000
H -1.058354417180 0.000000000000 0.000000000000"""
def test_toxyz_1a():
subject = subject1
mol = qcdb.Molecule(subject)
xyz = mol.to_string(dtype='xyz', units='Bohr')
assert compare_strings(ans1_au, xyz, sys._getframe().f_code.co_name)
def test_toxyz_1b():
subject = subject1
mol = qcdb.Molecule(subject)
xyz = mol.to_string(dtype='xyz', units='Angstrom')
assert compare_strings(ans1_ang, xyz, sys._getframe().f_code.co_name)
def test_toxyz_1c():
subject = subject1
mol = qcdb.Molecule(subject)
xyz = mol.to_string(dtype='xyz', prec=8, atom_format='{elea}{elem}{elbl}')
print(xyz)
assert compare_strings(ans1c_ang, xyz, sys._getframe().f_code.co_name)
#def test_toxyz_2a():
# subject = subject2
# mol = qcdb.Molecule(subject)
#
# xyz = mol.to_string(dtype='xyz', units='Bohr')
#
# assert compare_strings(ans2_au, xyz, sys._getframe().f_code.co_name)
#
#def test_toxyz_2b():
# subject = subject2
# mol = qcdb.Molecule(subject)
#
# xyz = mol.to_string(dtype='xyz', units='Angstrom', ghost_format='Gh({elez})')
#
# assert compare_strings(ans2_ang, xyz, sys._getframe().f_code.co_name)
#
#def test_toxyz_2c():
# subject = subject2
# mol = qcdb.Molecule(subject)
#
# xyz = mol.to_string(dtype='xyz', units='Angstrom', ghost_format='')
#
# assert compare_strings(ans2c_ang, xyz, sys._getframe().f_code.co_name)
def test_toxyz_2a():
subject = subject2
mol = qcdb.Molecule(subject)
xyz = mol.to_string(dtype='xyz', units='Bohr')
assert compare_strings(ans2_au, xyz, sys._getframe().f_code.co_name)
def test_toxyz_2b():
subject = subject2
mol = qcdb.Molecule(subject)
xyz = mol.to_string(dtype='xyz', units='Angstrom', ghost_format='Gh({elez})')
assert compare_strings(ans2_ang, xyz, sys._getframe().f_code.co_name)
def test_toxyz_2c():
subject = subject2
mol = qcdb.Molecule(subject)
xyz = mol.to_string(dtype='xyz', units='Angstrom', ghost_format='')
assert compare_strings(ans2c_ang, xyz, sys._getframe().f_code.co_name)
@using_psi4_molrec
def test_toxyz_3a():
import psi4
subject = subject2
mol = psi4.core.Molecule.from_string(subject)
xyz = mol.to_string(dtype='xyz', units='Bohr')
assert compare_strings(ans2_au, xyz, sys._getframe().f_code.co_name)
|
amjames/psi4
|
psi4/driver/qcdb/pytest/test_to_string.py
|
Python
|
lgpl-3.0
| 4,698
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
###############################################
# Author: Lemuel
# E-mail: <wlemuel@hotmail.com>
# Licence: GPL
# Filename: seperater.py
# Created Time: 2013-09-21 12:31
# Last modified: 2013-09-21 12:31
###############################################
import pygtk
pygtk.require('2.0')
import gtk
from utils import color_hex_to_cairo
from constant import THEME
class Seperater(gtk.Label):
def __init__(self, width=1, direction="h"):
super(Seperater, self).__init__()
self.width = width
self.direction = direction
self.size_request()
self.connect("expose_event", self.expose_event)
def size_request(self):
if self.direction == "v":
self.set_size_request(self.width, self.allocation.height)
else:
self.set_size_request(self.allocation.width, self.width)
def expose_event(self, widget, event):
cr = widget.window.cairo_create()
rect = widget.allocation
if self.direction == "v":
real_rect = (rect.x, rect.y,
self.width, rect.height)
else:
real_rect = (rect.x, rect.y,
rect.width, self.width)
cr.set_source_rgb(*color_hex_to_cairo(THEME['hover']))
cr.rectangle(*real_rect)
cr.fill()
return True
|
Lemueler/Petro-UI
|
seperater.py
|
Python
|
lgpl-3.0
| 1,399
|
#
# cif.py
#
# Python CIF parser: https://github.com/gjbekker/cif-parsers
#
# By Gert-Jan Bekker
# License: MIT
# See https://github.com/gjbekker/cif-parsers/blob/master/LICENSE
#
import gzip, os, re
try: import json
except: import simplejson as json
try:
str.partition
def partitionString(string, sep): return string.partition(sep)
except:
def partitionString(string, sep):
tmp = string.split(sep)
return [tmp.pop(0), sep, sep.join(tmp)]
class _loop:
def __init__(self, parserObj):
self.parserObj = parserObj
self.length = 0
self.refID = -1
self.refList = []
self.namesDefined = False
def addName(self, name):
catName = type(name) == str and partitionString(name, ".") or ["", "", ""]
if catName[1]:
if not self.parserObj.currentTarget[-2].has_key(catName[0]): self.parserObj.currentTarget[-2][catName[0]] = {}
if not self.parserObj.currentTarget[-2][catName[0]].has_key(catName[2]): self.parserObj.currentTarget[-2][catName[0]][catName[2]] = []
self.refList.append(self.parserObj.currentTarget[-2][catName[0]][catName[2]])
else:
if not self.parserObj.currentTarget[-2].has_key(catName[0]): self.parserObj.currentTarget[-2][catName[0]] = []
self.refList.append(self.parserObj.currentTarget[-2][catName[0]])
self.length = len(self.refList)
def pushValue(self, value):
if not self.namesDefined: self.namesDefined = True
target = self.nextTarget()
if value == "stop_": return self.stopPush()
target.append(value)
def nextTarget(self):
self.refID = (self.refID+1)%self.length
return self.refList[self.refID]
def stopPush(self):
self.refID = -1
def specialSplit(content):
output = [["", False]]
quote = False
length = len(content)
log = ""
for c in xrange(length):
isWS = content[c] == " " or content[c] == "\t"
if (content[c] == "'" or content[c] == '"') and (c == 0 or content[c-1] == " " or content[c-1] == "\t" or c == length-1 or content[c+1] == " " or content[c+1] == "\t"): quote = not quote
elif not quote and isWS and output[-1][0] != "": output.append(["", False])
elif not quote and content[c] == "#": break
elif not isWS or quote:
output[-1][0] += content[c]
output[-1][1] = quote
if output[-1][0] == "": output.pop()
return output
class targetSetter:
def __init__(self, obj, key):
self.obj = obj
self.key = key
def setValue(self, value): self.obj[self.key] = value
class CIFparser:
def __init__(self):
self.data = {}
self.currentTarget = None
self.loopPointer = None
def parseString(self, contents):
multi_line_mode = False
buffer = []
for line in contents.splitlines():
Z = line[:1]
line = line.strip()
if Z == ";":
if multi_line_mode: self.setDataValue("\n".join(buffer))
else: buffer = []
multi_line_mode = not multi_line_mode
line = line[1:].strip()
if multi_line_mode: buffer.append(line)
else: self.processContent(specialSplit(line))
def parse(self, fileobj):
multi_line_mode = False
buffer = []
for line in fileobj.readlines():
Z = line[:1]
line = line.strip()
if Z == ";":
if multi_line_mode: self.setDataValue("\n".join(buffer))
else: buffer = []
multi_line_mode = not multi_line_mode
line = line[1:].strip()
if multi_line_mode: buffer.append(line)
else: self.processContent(specialSplit(line))
def processContent(self, content):
for c, quoted in content:
if c == "global_" and not quoted:
self.loopPointer = None
self.selectGlobal()
elif c[:5] == "data_" and not quoted:
self.loopPointer = None
self.selectData(c)
elif c[:5] == "save_" and not quoted:
self.loopPointer = None
if c[5:]: self.selectFrame(c)
else: self.endFrame()
elif c == "loop_" and not quoted: self.loopPointer = _loop(self)
elif c[:1] == "_" and not quoted: self.setDataName(c[1:])
else: self.setDataValue(c)
def setDataName(self, name):
if self.loopPointer != None:
if self.loopPointer.namesDefined: self.loopPointer = None
else: return self.loopPointer.addName(name)
name = partitionString(name, ".")
self.currentTarget.pop()
if name[1]:
if not self.currentTarget[-1].has_key(name[0]): self.currentTarget[-1][name[0]] = {}
self.currentTarget[-1][name[0]][name[2]] = ""
self.currentTarget = self.currentTarget + [targetSetter(self.currentTarget[-1][name[0]], name[2])]
else:
self.currentTarget[-1][name[0]] = ""
self.currentTarget = self.currentTarget + [targetSetter(self.currentTarget[-1], name[0])]
def setDataValue(self, value):
if self.loopPointer != None: self.loopPointer.pushValue(value)
else: self.currentTarget[-1].setValue([value])
def selectGlobal(self): self.currentTarget = [self.data, self.data, None]
def selectData(self, name):
if not self.data.has_key(name): self.data[name] = {}
self.currentTarget = [self.data, self.data[name], None]
def selectFrame(self, name=""):
if not self.currentTarget[1].has_key(name): self.currentTarget[1][name] = {}
self.currentTarget = self.currentTarget[:2] + [self.currentTarget[1][name], None]
def endData(self):
self.currentTarget = self.currentTarget[:2]
def endFrame(self):
self.currentTarget = self.currentTarget[:3]
####################################################################################################################################################
class __CIFfloat__(float):
def __repr__(self): return '%.15g' % self
class __CIFint__(int):
def __repr__(self): return str(self)
def __CIFfloatRange__(inp):
try:
pos = inp.index("-", 1)
return (__CIFfloat__(inp[:pos]), __CIFfloat__(inp[pos+1:]))
except: return (__CIFfloat__(inp),)
def __CIFintRange__(inp):
try:
pos = inp.index("-", 1)
return (__CIFint__(inp[:pos]), __CIFint__(inp[pos+1:]))
except: return (__CIFint__(inp),)
def __loadCIFdic__(dicFile, force=False):
jsfDic = dicFile[:-4]+".json"
jsf = dicFile[:-4]+"_summary.json"
dic = {}
try:
if force: throw
dic = json.loads(open(jsf).read())
except:
parser = CIFparser()
parser.parse(open(dicFile))
json.dump(parser.data, open(jsfDic, "w"))
for k,v in parser.data["data_mmcif_pdbx.dic"].iteritems():
if type(v) != dict or not v.has_key("item_type"): continue
name = partitionString(k[6:], ".")
if not dic.has_key(name[0]): dic[name[0]] = {}
dic[name[0]][name[2]] = v["item_type"]["code"][0].strip()
json.dump(dic, open(jsf, "w"))
typing = {}
for k,v in dic.iteritems():
for k2, v2 in v.iteritems():
if v2 == "int":
if not typing.has_key(k): typing[k] = {}
typing[k][k2] = __CIFint__
elif v2 == "float":
if not typing.has_key(k): typing[k] = {}
typing[k][k2] = __CIFfloat__
elif v2 == "int-range":
if not typing.has_key(k): typing[k] = {}
typing[k][k2] = __CIFintRange__
elif v2 == "float-range":
if not typing.has_key(k): typing[k] = {}
typing[k][k2] = __CIFfloatRange__
return typing
def __dumpCIF__(jso): return __dumpPart__(jso)
__cifStrCheck__ = re.compile(r"[\\s\(\)]")
__cifStrNLCheck__ = re.compile(r"[\n]")
def __dumpStr__(inp):
if inp == None: return "?"
else:
if type(inp) != str and type(inp) != unicode: return str(inp)
if re.search(__cifStrNLCheck__, inp) != None: return "\n;%s\n;"%inp
if re.search(__cifStrCheck__, inp) != None: return "'%s'"%inp
else: return inp
def __padString__(inp, flength): return inp+(" "*(flength-len(inp)))
def __dumpCat__(k, v):
output = "#\n"
noi = len(v[v.keys()[0]])
if noi == 1:
pad = 0
for k2 in v.keys():
if len(k2) > pad: pad = len(k2)
pad += 3
for k2 in v.keys(): output += "_%s.%s%s\n"%(k, __padString__(k2, pad), __dumpStr__(v[k2][0]))
else:
output += "loop_\n"
pad = []
for k2 in v.keys():
output += "_%s.%s\n"%(k, k2)
pad.append(0)
tmp1 = []
for i in xrange(noi):
tmp2 = []
tmp1.append(tmp2)
for k2 in v.keys(): tmp2.append(__dumpStr__(v[k2][i]))
for j in xrange(len(tmp1[0])):
pad = 0
for i in xrange(len(tmp1)):
if tmp1[i][j][:2] != "\n;" and len(tmp1[i][j]) > pad: pad = len(tmp1[i][j])
pad += 1
for i in xrange(len(tmp1)):
if tmp1[i][0][:2] != "\n;": tmp1[i][j] = __padString__(tmp1[i][j], pad)
for i in xrange(noi): output += "".join(tmp1[i])+"\n";
return output.strip()+"\n"
def __dumpPart__(jso):
inner = True
output = ""
for k,v in jso.items():
if isinstance(v, dict):
if k[:5] != "data_" and k[:5] != "save_" and k[:7] != "global_": output += __dumpCat__(k, v)
else:
output += k+"\n"
output += __dumpPart__(v)
inner = False
if inner: return output+"#\n"
else: return output
def __loadCIFData__(data, doClean=True, doType=True):
parser = CIFparser()
if type(data) == str: parser.parseString(data)
else: parser.parse(data) # fileobj
if not doClean: return parser.data
for k,v in parser.data.iteritems():
for k2, v2 in v.iteritems():
for k3, v3 in v2.iteritems():
for i in xrange(len(v3)): v2[k3][i] = not (v3[i] == "?" or v3[i] == ".") and v3[i] or None
if not doType or not __mmcifTyping__: return parser.data
for struct, data in parser.data.iteritems():
for k,v in __mmcifTyping__.iteritems():
if not data.has_key(k): continue
else:
for k2, v2 in v.iteritems():
if data[k].has_key(k2):
for r in xrange(len(data[k][k2])):
try: data[k][k2][r] = v2(data[k][k2][r])
except: pass
return parser.data
def __loadCIF__(cifFile, doClean=True, doType=True):
parser = CIFparser()
if cifFile[-3:].lower() == ".gz": parser.parse(gzip.open(cifFile))
else: parser.parse(open(cifFile))
if not doClean: return parser.data
for k,v in parser.data.iteritems():
for k2, v2 in v.iteritems():
for k3, v3 in v2.iteritems():
for i in xrange(len(v3)): v2[k3][i] = not (v3[i] == "?" or v3[i] == ".") and v3[i] or None
if not doType or not __mmcifTyping__: return parser.data
for struct, data in parser.data.iteritems():
for k,v in __mmcifTyping__.iteritems():
if not data.has_key(k): continue
else:
for k2, v2 in v.iteritems():
if data[k].has_key(k2):
for r in xrange(len(data[k][k2])):
try: data[k][k2][r] = v2(data[k][k2][r])
except: pass
return parser.data
__mmcifTyping__ = None
|
gjbekker/molmil
|
apps/agora/vr/structures/cif.py
|
Python
|
lgpl-3.0
| 11,165
|
from django.http import JsonResponse
from data_2015_fall.models import *
import simplejson
from collections import defaultdict
from operator import itemgetter
# ===================================================
# Classes
# ===================================================
class Expert(object):
def __init__(self, name):
self.name = name
def toDict(self):
return {
"name": self.name
}
# ===================================================
# Functions
# ===================================================
def getTopKExpertsByKeywords(request, keywords, k):
keywordsList = keywords.split("+")
expertHash = defaultdict(int) # map expert name to its count
list1 = []
# count appearance of author who publish the paper matches keyword
for keyword in keywordsList:
for article in Article.nodes.filter(title__icontains=keyword):
list1.append(article.title)
for author in article.authors:
expertHash[author.name]+=1
experts = []
for key, value in sorted(expertHash.iteritems(), key=itemgetter(1), reverse=True)[0:int(k)]:
experts.append(Expert(key))
#print key + ", " + str(value) # debug msg
return JsonResponse({ "experts": [e.toDict() for e in experts] })
|
cmusv-sc/DIWD-Team4-Wei-Lin-Tsai
|
src/python/data_2015_fall/api/queryExperts.py
|
Python
|
unlicense
| 1,307
|
# coding: utf-8
"""
@Author: Well
@Date: 2014 - 05 - 10
"""
# common Capability(8)
# automationName : Appium (default) or Selendroid
# * platformName : iOS, Android, or FirefoxOS
# * platformVersion : e.g. 7.1, 4.4
# * deviceName : eg., iPhone Simulator, Android Emulator
# app : /abs/path/to/my.apk or http://myapp.com/app.ipa, or .zip also ok. Incompatible with browserName
# browserName : ‘Safari’ for iOS and ‘Chrome’, ‘Chromium’, or ‘Browser’ for Android
# newCommandTimeout : e.g. 60
# autoLaunch : true(default) or false
# Android Only(10)
# appActivity : e.g.MainActivity, .Settings
# appPackage : e.g.com.example.android.myApp, com.android.settings
# appWaitActivity : e.g.SplashActivity (过渡的,如介绍页面)
# appWaitPackage : e.g.com.example.android.myApp, com.android.settings
# deviceReadyTimeout : e.g. 5
# compressXml : true # setCompressedLayoutHeirarchy(true)
# androidCoverage : e.g. com.my.Pkg/com.my.Pkg.instrumentation.MyInstrumentation
# enablePerformanceLogging : true(default), false # Chrome and webview only
# avdLaunchTimeout : 120000(default)
import os
# Returns abs path relative to this file and not cwd
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
def get_desired_capabilities(app):
if app == 'Chrome':
desired_caps = {
'platformName': 'Android',
'platformVersion': '4.4',
'deviceName': '0481575a0b4b78ce',
'browserName': app,
}
if app == 'Safari':
desired_caps = {
'platformName': 'iOS',
'platformVersion': '6.1',
'deviceName': 'iPhone Simulator',
'browserName': app,
}
# 自定义要启动的应用
# 'appActivity': '.Calculator',
# 'appPackage': 'com.android.calculator2',
if app == '':
desired_caps = {
'platformName': 'Android',
'platformVersion': '4.4',
'deviceName': '0481575a0b4b78ce',
}
else:
desired_caps = {
'platformName': 'Android',
'platformVersion': '4.4',
'deviceName': '0481575a0b4b78ce',
'app': PATH('../../apps/' + app),
}
return desired_caps
|
Vincent-HaoZ/neil_test_appium
|
appium/test/android/desired_capabilities.py
|
Python
|
unlicense
| 2,242
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wildlife.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
smagic39/PostGis-Cookbook-Code
|
Chapter 9/wildlife/manage.py
|
Python
|
unlicense
| 251
|
"""
STM32 Bootloader tool
See the following STMicro application notes:
* AN2606 for the general description
* AN3155 for the protocol
Dependencies:
* pySerial
"""
import binascii
import struct
import serial
def log(text):
print(text)
# AN2606: 3.2 Bootloader identification
__bl_interfaces = [
(),
( 'usart' ),
( 'usart', 'usart2' ),
( 'usart', 'can', 'dfu' ),
( 'usart', 'dfu' ),
( 'usart', 'i2c' ),
( 'i2c' ),
( 'usart', 'can', 'dfu', 'i2c' ),
( 'i2c', 'spi' ),
( 'usart', 'can', 'dfu', 'i2c', 'spi' ),
( 'usart', 'dfu', 'i2c' ),
( 'usart', 'i2c', 'spi' ),
( 'usart', 'spi' ),
( 'usart', 'dfu', 'i2c', 'spi' ),
]
# AN2606: 48 Device-dependent bootloader parameters
__products = {
"\x04\x10": { 'name': "STM32F10xxxx (medium density)", 'flash_base': 0x08000000, 'flash_size': 0x20000, 'ram_base': 0x20000000, 'ram_size': 0x5000, 'ram_valid': 0x20000200 }
# TODO: add more devices!
}
class BootloaderError(Exception):
"Generic bootloader error"
pass
class TimeoutError(BootloaderError):
"Communications timeout"
pass
class ProtocolError(BootloaderError):
"Data exchange protocol error"
pass
class CommandError(BootloaderError):
"Command execution error"
pass
ACK = "\x79"
NAK = "\x1F"
def _append_checksum(data):
"Compute and append the checksum"
cs = 0
if len(data) == 1:
cs = (~ord(data)) & 0xFF
else:
for x in data:
cs ^= ord(x)
return data + chr(cs)
class Stm32Bootloader(object):
"Encapsulates the bootloader functionality"
def __init__(self, port, autobaud=True):
self._p = port
if autobaud:
self._run_autobaud()
def _run_autobaud(self):
"Automatic baud rate detection procedure"
self._p.write("\x7F")
if _receive_ack(self._p):
log("Autobaud procedure successful (got ACK)")
else:
log("Autobaud procedure successful (got NAK; assuming baud rate is correct)")
def _receive_bytes(self, count):
"Receive N bytes from the port"
buffer = ''
while count > 0:
chunk = self._p.read(count)
if not chunk:
raise TimeoutError("receiving data")
buffer += chunk
count -= len(chunk)
return buffer
def _receive_ack(self):
"Receive and verify the ACK byte"
ack = self._p.read()
if not ack:
raise TimeoutError("receiving ACK")
if ack == ACK:
return True
if ack == NAK:
return False
raise ProtocolError("unexpected response: %02x" % ord(ack))
def _send_data_check_ack(self, data):
self._p.write(_append_checksum(data))
return self._receive_ack()
def _receive_data_check_ack(self, count):
data = self._receive_bytes(count)
if not self._receive_ack():
raise ProtocolError("expected ACK; got NAK instead")
return data
def get_blinfo(self):
"Retrieve the bootloader version and the list of supported commands"
if not self._send_data_check_ack("\x00"):
raise CommandError("command failed")
count = struct.unpack('B', self._receive_bytes(1))[0] + 1
rsp = self._receive_data_check_ack(count)
version = ord(rsp[0]) & 0xFF
supported_cmds = rsp[1:]
return { 'version': version, 'supported_cmds': supported_cmds }
def get_pid(self):
"Retrieve the product ID (2 bytes currently)"
if not self._send_data_check_ack("\x02"):
raise CommandError("command failed")
count = struct.unpack('B', self._receive_bytes(1))[0] + 1
rsp = self._receive_data_check_ack(count)
return rsp
def read_memory(self, addr, count):
"Read memory region"
if not self._send_data_check_ack("\x11"):
raise CommandError("read protection is enabled")
if not self._send_data_check_ack(struct.pack('>I', addr)):
raise CommandError("address is rejected by the device")
if not self._send_data_check_ack(struct.pack('B', count - 1)):
raise CommandError("count is rejected by the device")
rsp = self._receive_bytes(count)
return rsp
def write_memory(self, addr, data):
"Write memory region"
if not self._send_data_check_ack("\x31"):
raise CommandError("read protection is enabled")
if not self._send_data_check_ack(struct.pack('>I', addr)):
raise CommandError("address is rejected by the device")
if not self._send_data_check_ack(struct.pack('B', len(data) - 1) + data):
raise CommandError("checksum error")
# NOTE: according to the diagram in AN3155,
# NAK is not sent if memory address is invalid
def erase_memory(self, pages):
"Erase memory pages"
if not self._send_data_check_ack("\x43"):
raise CommandError("read protection is enabled")
if pages is None:
# Whole device
if not self._send_data_check_ack(struct.pack('>I', addr)):
raise CommandError("address is rejected by the device")
else:
# Specific pages
data = struct.pack('B%dB' % len(pages), len(pages) - 1, *pages)
if not self._send_data_check_ack(data):
raise CommandError("checksum error")
def write_protect(self, sectors):
"Apply write protection to flash sectors"
if not self._send_data_check_ack("\x63"):
raise CommandError("read protection is enabled")
data = struct.pack('B%dB' % len(sectors), len(sectors) - 1, *sectors)
if not self._send_data_check_ack(data):
raise CommandError("checksum error")
def write_unprotect(self):
"Remove write protection from all flash"
if not self._send_data_check_ack("\x73"):
raise CommandError("read protection is enabled")
self._receive_ack()
def readout_protect(self):
"Enable readout protection on the device"
if not self._send_data_check_ack("\x82"):
raise CommandError("read protection is enabled")
self._receive_ack()
def readout_unprotect(self):
"Disable readout protection on the device"
if not self._send_data_check_ack("\x92"):
raise CommandError("something went wrong")
self._receive_ack()
def go(self, addr):
"Start executing code from the specified address"
if not self._send_data_check_ack("\x21"):
raise CommandError("read protection is enabled")
if not self._send_data_check_ack(struct.pack('>I', addr)):
raise CommandError("address is rejected by the device")
# End
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
sys.exit(1)
port = sys.argv[1]
baudrate = 57600
p = serial.Serial(port, baudrate, parity=serial.PARITY_EVEN, timeout=2)
bl = Stm32Bootloader(p)
blid = bl.get_blinfo()['version']
log("Bootloader version: %02x" % blid)
bl_ifs = __bl_interfaces[blid >> 4]
log("Bootloader interfaces: %s" % str(bl_ifs))
pid = bl.get_pid()
log("Product ID: %s" % binascii.hexlify(pid))
product = __products[pid]
log("Product: %s" % product['name'])
flash_base = product['flash_base']
flash_size = product['flash_size']
block_size = 0x100
log("Dumping memory: %08x:%08x" % (flash_base, flash_base + flash_size))
with open('flash_dump.bin', 'wb') as fp:
for offset in xrange(0, flash_size, block_size):
data = bl.read_memory(flash_base + offset, block_size)
fp.write(data)
log("Dumping completed")
# EOF
|
dev-zzo/pwn-tools
|
embedded/stm32bldr.py
|
Python
|
unlicense
| 7,844
|
import numpy as np
from numpy import linalg
from numpy.testing import assert_array_almost_equal, assert_almost_equal
from numpy.testing import assert_equal, assert_array_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from scipy.sparse import csr_matrix
from scipy.spatial.distance import cosine, cityblock, minkowski
from sklearn.utils.testing import assert_greater
from ..pairwise import euclidean_distances
from ..pairwise import linear_kernel
from ..pairwise import chi2_kernel, additive_chi2_kernel
from ..pairwise import polynomial_kernel
from ..pairwise import rbf_kernel
from ..pairwise import sigmoid_kernel
from .. import pairwise_distances, pairwise_kernels
from ..pairwise import pairwise_kernel_functions
from ..pairwise import check_pairwise_arrays
from ..pairwise import _parallel_pairwise
def test_pairwise_distances():
""" Test the pairwise_distance helper function. """
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test cosine as a string metric versus cosine callable
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Tests that precomputed metric returns pointer to, and not copy of, X.
S = np.dot(X, X.T)
S2 = pairwise_distances(S, metric="precomputed")
assert_true(S is S2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
def test_pairwise_parallel():
rng = np.random.RandomState(0)
for func in (np.array, csr_matrix):
X = func(rng.random_sample((5, 4)))
Y = func(rng.random_sample((3, 4)))
S = euclidean_distances(X)
S2 = _parallel_pairwise(X, None, euclidean_distances, n_jobs=-1)
assert_array_almost_equal(S, S2)
S = euclidean_distances(X, Y)
S2 = _parallel_pairwise(X, Y, euclidean_distances, n_jobs=-1)
assert_array_almost_equal(S, S2)
def test_pairwise_kernels():
""" Test the pairwise_kernels helper function. """
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in pairwise_kernel_functions.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = pairwise_kernel_functions[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
continue
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def callable_rbf_kernel(x, y, **kwds):
""" Callable version of pairwise.rbf_kernel. """
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_euclidean_distances():
""" Check the pairwise Euclidean distances computation"""
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
def test_kernel_symmetry():
""" Valid kernels should be symmetric"""
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_check_dense_matrices():
""" Ensure that pairwise array check works for dense matrices."""
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_equal(XA, XA_checked)
def test_check_XB_returned():
""" Ensure that if XA and XB are given correctly, they return as equal."""
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA, XA_checked)
assert_equal(XB, XB_checked)
def test_check_different_dimensions():
""" Ensure an error is raised if the dimensions are different. """
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_invalid_dimensions():
""" Ensure an error is raised on 1D input arrays. """
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
""" Ensures that checks return valid sparse matrices. """
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
assert_equal(XA_sparse, XA_checked)
assert_equal(XB_sparse, XB_checked)
def tuplify(X):
""" Turns a numpy matrix (any n-dimensional array) into tuples."""
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
""" Ensures that checks return valid tuples. """
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_equal(XA_tuples, XA_checked)
assert_equal(XB_tuples, XB_checked)
|
seckcoder/lang-learn
|
python/sklearn/sklearn/metrics/tests/test_pairwise.py
|
Python
|
unlicense
| 11,874
|
#!python3.3
# -*- coding: utf-8 -*-
#from distutils.core import setup
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
import sys
import os
DISTUTILS_DEBUG = True
py_version = sys.version_info[:2]
PY3 = py_version[0] == 3
if not PY3:
raise RuntimeError('Python 3.x is required')
thisdir = os.path.dirname(__file__)
with open(os.path.join(thisdir, 'README.md')) as file:
long_description = file.read()
setup(name = 'pyHexa',
version = '0.0.3', # major.minor.revision
platforms = ['Linux', 'Windows'],
url = 'https://github.com/Rod-Persky/pyHexa',
classifiers = [
'Development Status :: 3 - Alpha',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: Academic Free License (AFL)',
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Environment :: No Input/Output (Daemon)', # No IO required
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: Microsoft',
'Operating System :: POSIX',
'Operating System :: OS Independent',
],
description = 'Python Hexapod WEB UI',
long_description = long_description,
license = 'Academic Free License ("AFL") v. 3.0',
author = 'Rodney Persky',
author_email = 'rodney.persky@gmail.com',
packages = find_packages('pyHexa'),
package_dir = {'pyHexa': 'pyHexa'},
zip_safe = True,
include_package_data = True,
py_modules = ['ez_setup'],
install_requires=['bottle>=0.11.5',
'cherrypy>=3.2.4'],
)
|
Rod-Persky/pyLynxHexapod
|
setup.py
|
Python
|
unlicense
| 2,125
|
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
d = {}
left = -1
right = 0
max = 0
if len(s) < 2:
return len(s)
while right < len(s)-1:
d[s[right]] = right
right += 1
if d.has_key(s[right]) and d[s[right]] > left:
left = d[s[right]]
if right - left > max:
max = right - left
return max
|
taulk/oj
|
LeetCode/[3]longest-substring-without-repeating-characters/Solution.py
|
Python
|
unlicense
| 518
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt4 (Qt v4.8.7)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x04\x28\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x03\xef\x49\x44\x41\x54\x78\x5e\xd5\x99\x7d\x4a\xe3\x40\
\x14\xc0\xad\x16\x44\x51\x12\x54\x44\x51\x4c\x44\xff\x50\x44\x9b\
\x7a\x81\xd5\x13\x34\x7b\x83\x78\x82\xcd\x9e\x60\x7b\x84\xf5\x06\
\xf1\x06\xe9\x09\x36\x5e\x40\x1b\x41\x54\xfc\x20\x11\x45\xf1\xab\
\x1d\x11\x45\xa8\xba\x2f\x4b\x35\x62\xe6\xb3\xd3\x90\xee\x83\xa1\
\xc2\xbc\x79\x33\xbf\xd7\xd7\x37\xef\x8d\xb9\xb7\xb7\xb7\xae\x5c\
\x2e\xd7\xd5\x8a\x2c\x2f\x2f\xff\x80\x8f\xdf\x0c\x35\x6f\x6b\x6b\
\x6b\xb5\x2b\x25\xc9\xcb\x2c\x06\xf8\x32\x87\xda\x4a\xb1\x58\xd4\
\xb6\xb7\xb7\xc3\x8e\x02\x30\x0c\xa3\x04\x00\x2a\xa7\xba\x0d\xe3\
\x67\x47\x01\xc0\xe1\x4d\x01\x75\xf3\x7f\x07\xd0\x97\x96\x96\x0a\
\x3b\x3b\x3b\x7e\x47\x00\x2c\x2e\x2e\xc6\xe1\xc3\x2f\x56\x1a\x61\
\x94\x6f\xd1\xfb\x56\x3c\x91\x2d\x80\x70\x1a\x5d\x58\x58\x50\xe0\
\xa3\x4e\x51\x09\x60\xe8\x84\x39\x73\x77\x77\xb7\x92\x5d\x08\xb1\
\x63\xbf\xda\x1c\x16\xe5\xc7\xdc\xd1\x00\x5e\x34\x68\x00\xf3\xf3\
\xf3\xf6\xde\xde\x1e\xca\x04\x60\x6e\x6e\x4e\x61\x00\x38\xfb\xfb\
\xfb\x3e\xe8\x91\xe6\xd5\x66\x4a\xdd\xc8\x00\x80\xe9\xfd\xfa\xc1\
\xc1\x81\xdf\xd4\x73\xe1\x03\xab\x9b\x29\xc0\xeb\xeb\xab\x4d\x99\
\x76\x3f\xe9\x51\x01\x32\x49\xa3\xb3\xb3\xb3\x1a\x78\xd6\xa0\x03\
\xc4\x30\xa0\x8b\xfd\x96\x20\xe3\x65\x01\xc0\xbe\x79\xe1\x60\xde\
\xfb\xc4\xe1\xe1\x21\x9a\x99\x99\x89\xb2\x91\xf1\xe5\xf0\x06\xcc\
\x85\x59\x01\x58\x34\xef\x1f\x1f\x1f\xa3\x2f\xfa\xce\xe7\x52\x3b\
\xf2\xfc\xd1\xd1\x51\x98\xc9\x4d\x3c\x3d\x3d\xcd\x0a\x1f\x0f\x07\
\x05\x10\xef\x00\xce\xc9\xc9\xc9\x66\x06\xc5\x1c\xc3\xfb\xb1\x77\
\xdd\xaf\x13\x70\xe0\x50\xd7\xf5\xf7\x5b\x39\x9e\xef\x40\x80\x6a\
\x10\x04\x21\x01\x3c\x02\xb3\xc2\x30\x4c\xdc\xbe\x9a\xa6\x29\xcd\
\xb2\x43\xc5\xd9\x84\x35\xc5\xb6\x00\x4c\x4d\x4d\x15\x00\x40\xa7\
\xa8\x38\x14\x70\x87\x91\x14\x48\x15\xad\x01\xfb\x7e\x3b\x3d\x3d\
\xdd\x64\x03\xc8\x57\x9e\x1e\x69\x02\x0e\xe0\xc3\x41\xca\x2d\xda\
\x8d\xe6\xd9\x00\x92\xe9\x33\x38\x3b\x3b\xa3\x36\x29\x00\x91\xa8\
\x7b\x26\x27\x27\x35\x00\x58\xe1\xe8\xe2\xd6\xa4\x00\x26\x26\x26\
\x58\xe1\xe3\xa6\xd8\xcd\xa9\xb0\x7f\xe9\xfc\xfc\xbc\x42\x05\x60\
\x6c\x64\x33\x36\x71\xda\xd8\x10\xd5\x61\xa8\x98\x30\xa2\x03\xc8\
\x14\x6f\x17\x17\x17\xc2\x3d\xee\xf8\xf8\x38\xe9\x4e\x29\x63\xde\
\x98\x4c\xd0\x57\x60\x1f\x24\x0c\x30\x36\x36\xc6\xea\x7b\xeb\xa0\
\xf3\x8b\x16\x5e\x97\x97\x97\x3e\xa7\xf7\xab\xa0\xbb\x0e\xf6\x22\
\x08\x55\xa4\x7a\xcd\x4b\xc4\xa9\x0e\xa3\xcc\x0a\x2f\x4e\x00\xaf\
\x39\xe7\x61\xaa\x55\x4b\x1e\x40\x5c\xaa\x57\x57\x57\x89\xcb\x6d\
\x74\x74\x94\x94\x14\x1c\x4a\x1f\xb1\x02\xeb\xb4\xc8\x1e\x37\xc0\
\xc8\xc8\x48\x09\x6a\x7a\x55\x02\xc0\x21\xf4\x13\xd8\x1f\xef\xcd\
\xcd\xcd\x47\x23\x04\x83\x94\x52\xd7\x29\x00\xe2\xcf\x26\x8c\xda\
\x88\x14\x3e\x26\x2d\x15\x5f\x5f\x5f\xa3\xe1\xe1\x61\x2f\xf2\x7a\
\x22\x8c\x78\x01\x86\x86\x86\x14\xc9\xf0\x71\x6f\x6f\x6f\x11\xc6\
\x6e\x89\x10\x3e\x6e\xa2\x7e\x4a\x02\x18\xb0\xbe\x70\x77\x77\xe7\
\x33\x01\x38\x0e\x1f\x80\x87\x03\xca\xbc\x88\xf7\xeb\xb5\x5a\xad\
\x82\x2b\xc3\xc9\x0f\x63\xf2\x00\x65\xd8\x74\x43\xe2\xf6\xa5\xd6\
\x51\xe0\xe5\x50\x55\xd5\xb8\x9b\x8b\xc5\x64\x02\x28\x20\x1c\xad\
\xa3\x70\xf9\x00\x66\x49\x77\x4a\x6c\x8b\xd4\xcd\xc5\xa2\x83\x9d\
\x02\x42\xc8\x4f\x00\x08\x54\x88\x2e\x02\x91\xf0\x3e\xdd\x19\xec\
\x30\xb2\x61\xac\xc9\x00\x08\x7b\x7f\x70\x70\x50\x21\xd8\x75\xef\
\xef\xef\xb1\xce\x40\x08\x85\xb0\x0e\xf7\xc6\x6a\x12\x01\x06\x06\
\x06\x30\x35\x8a\x7c\xf8\x50\x42\xd2\x80\x3d\xff\x90\xc0\x09\x21\
\xa7\xc2\x9a\xd2\xc3\xc3\x43\x25\x06\xe0\xcf\x3e\x2e\x2c\x44\x6d\
\x04\xd0\x9b\x83\x22\xec\x07\xe2\x7c\x9a\xe1\xd3\xdf\xdf\x9f\x4c\
\x0a\xf2\x62\x81\x5d\xfb\xf1\xf1\x11\x7d\x00\xf4\xf5\xf5\xa5\x1c\
\x3e\xf2\x42\xaa\x50\xf3\x9c\x8d\x8b\xfb\xf4\xf4\xd4\x4a\xf8\xd8\
\x9c\x7d\x34\xed\x35\xdb\xe0\x01\xa0\xc5\x62\x3d\xce\xc9\xfc\xd2\
\xdb\xdb\x4b\x4a\x0a\xd5\xe7\xe7\xe7\x55\x01\x3b\x35\x5c\x8f\x10\
\xd9\x07\x3b\xe1\x3f\x00\x30\xf8\x9d\x65\x48\x3e\x7c\xd8\x6d\x28\
\xed\x6d\x89\x54\xa1\x02\x80\xbc\x08\x56\xb4\xae\x20\x80\x47\x00\
\xb0\x52\x03\xe8\xe9\xe9\xd1\xa0\xf6\xc7\x86\xcf\xcb\xcb\x4b\xc8\
\x0b\xc0\xe8\x11\x8c\x68\x9f\x7c\x4a\xde\xb7\xdb\xf5\x8a\xd1\x68\
\x34\x50\x77\x77\x37\xe9\x1f\x26\xb6\x04\x80\x44\xed\x23\x6e\x8f\
\x04\x60\xa6\x00\x40\xcc\x6a\x1e\x80\x31\xc3\x87\x52\xdc\xe1\xde\
\x8d\xf4\xbf\x17\x73\x56\xa3\x99\xa0\x5b\xa0\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x04\
\x00\x06\xfa\x24\
\x00\x69\
\x00\x63\x00\x6f\x00\x34\
\x00\x08\
\x06\x57\x5a\xe7\
\x00\x66\
\x00\x6f\x00\x6e\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
techbliss/Python_editor
|
6.8/plugins/Code editor/icons/iconf.py
|
Python
|
unlicense
| 5,271
|
def maxsubsumOn3(vector):
maxsum = 0
vectorlen = len(vector)
for i in range(vectorlen):
for j in range(i,vectorlen):
thissum=0
for k in range (i,j):
thissum=thissum+vector[k]
if(thissum>maxsum):
maxsum=thissum
return maxsum
array=[4,-3,15,-2,-1,2,-6,2]
print(maxsubsumOn3(array))
|
cagriulas/algorithm-analysis-17
|
w1/maxsubsumOn3.py
|
Python
|
unlicense
| 381
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFDS input pipelines for GLUE and C4 datasets."""
from typing import Callable, Dict, Iterable, Optional
import jax
import numpy as np
import tensorflow_datasets as tfds
import sentencepiece as spm
def _tfds_stream(dataset_name,
split,
batch_size,
data_dir,
shuffle_files,
shuffle_buffer_size,
batch_shuffle_size,
preprocess_fun,
repeat = True):
"""Streams batches of examples from TFDS, with pure-python pre-processing."""
ds = tfds.load(
name=dataset_name,
split=split,
data_dir=data_dir,
shuffle_files=shuffle_files)
if repeat:
ds = ds.cache()
ds = ds.repeat()
if shuffle_buffer_size is not None:
ds = ds.shuffle(shuffle_buffer_size)
ds = ds.batch(batch_size)
if batch_shuffle_size is not None:
ds = ds.shuffle(batch_shuffle_size)
for batch in tfds.as_numpy(ds):
yield preprocess_fun(batch)
def glue_inputs(dataset_name,
split,
batch_size,
tokenizer,
data_dir = None,
max_seq_length = 128,
training = True):
"""Input pipeline for fine-tuning on GLUE tasks.
Args:
dataset_name: TFDS dataset name.
split: Which dataset split to use (TRAINING, TEST or VALIDATION)
batch_size: Number of examples in each batch.
tokenizer: Tokenizer for converting text to integers representations.
data_dir: Optional directory from which to load dataset.
max_seq_length: Sequences longer than this are truncated; shorter sequences
are padded.
training: In training mode, we shuffle, repeat and buffer the dataset.
Returns:
Batched examples for specified dataset with keys and array types/shapes:
* "input_ids": <np.int32>[batch_size, max_seq_length]
* "type_ids": <np.int32>[batch_size, max_seq_length]
* "idx": <np.int32>[batch_size]
* "label": <np.int32>[batch_size]
"""
keys_lookup = {
"glue/cola": ("sentence",),
"glue/sst2": ("sentence",),
"glue/mrpc": ("sentence1", "sentence2"),
"glue/qqp": ("question1", "question2"),
"glue/stsb": ("sentence1", "sentence2"),
"glue/mnli": ("hypothesis", "premise"),
"glue/qnli": ("question", "sentence"),
"glue/rte": ("sentence1", "sentence2"),
# WNLI requires a special training recipe, so we don't eval on it.
"glue/wnli": ("sentence1", "sentence2")
}
keys = keys_lookup[dataset_name]
cls_id = tokenizer.PieceToId("[CLS]")
sep_id = tokenizer.PieceToId("[SEP]")
pad_id = tokenizer.pad_id()
def preprocess(batch):
"""Tokenize and convert text to model inputs."""
idx = batch["idx"]
input_batch_size = idx.shape[0]
input_ids = np.full((input_batch_size, max_seq_length),
pad_id,
dtype=np.int32)
type_ids = np.zeros((input_batch_size, max_seq_length), dtype=np.int32)
for i in range(input_batch_size):
ex_input_ids = [cls_id]
ex_type_ids = [0]
for type_id, key in enumerate(keys):
tokens = tokenizer.EncodeAsIds(batch[key][i]) + [sep_id]
ex_input_ids.extend(tokens)
ex_type_ids.extend([type_id] * len(tokens))
ex_input_ids = ex_input_ids[:max_seq_length]
ex_type_ids = ex_type_ids[:max_seq_length]
input_ids[i, :len(ex_input_ids)] = ex_input_ids
type_ids[i, :len(ex_type_ids)] = ex_type_ids
return {
"input_ids": input_ids,
"type_ids": type_ids,
"idx": idx.astype(np.int32),
"label": batch["label"]
}
return _tfds_stream(
dataset_name=dataset_name,
split=split,
batch_size=batch_size,
data_dir=data_dir,
shuffle_files=training,
shuffle_buffer_size=1024 if training else None,
batch_shuffle_size=128 if training else None,
preprocess_fun=preprocess,
repeat=training)
def _c4_data_unbatched(tokenizer,
max_seq_length):
"""Yields examples from C4 corpus that have len(text) <= max_seq_length."""
cls_id = tokenizer.PieceToId("[CLS]")
sep_id = tokenizer.PieceToId("[SEP]")
pad_id = tokenizer.pad_id()
ds = tfds.load(name="c4/en", split="train", shuffle_files=True)
ds = ds.repeat()
ds = ds.shuffle(1024)
ds = ds.batch(16) # Batch documents to potentially speed up input pipeline
input_ids_buf = np.full((1024, max_seq_length), pad_id, dtype=np.int32)
type_ids_buf = np.zeros((1024, max_seq_length), dtype=np.int32)
next_sentence_labels_buf = np.full(1024, -1, dtype=np.int32)
for batch in tfds.as_numpy(ds):
for text in batch["text"]:
text = str(text, "utf-8")
lines = [tokenizer.EncodeAsIds(line) for line in text.splitlines()]
j = 0
while j < len(lines) - 1:
if len(lines[j]) + len(lines[j + 1]) > max_seq_length - 3:
j += 1
else:
idx = np.random.randint(input_ids_buf.shape[0])
if next_sentence_labels_buf[idx] != -1:
yield {
"input_ids": input_ids_buf[idx].copy(),
"type_ids": type_ids_buf[idx].copy(),
"next_sentence_labels": next_sentence_labels_buf[idx].copy(),
}
input_ids_buf[idx] = pad_id
type_ids_buf[idx] = 1
cum_len = 0
for k in range(j, len(lines)):
cum_len += len(lines[k])
if cum_len > max_seq_length - 3:
k -= 1
break
selected_lines = lines[j:k + 1]
j = k + 1
pivot = np.random.randint(1, len(selected_lines))
if np.random.random() < 0.5:
datum = [cls_id]
for tokens in selected_lines[:pivot]:
datum.extend(tokens)
datum.append(sep_id)
type_ids_buf[idx, :len(datum)] = 0
for tokens in selected_lines[pivot:]:
datum.extend(tokens)
datum.append(sep_id)
next_sentence_label = 0
type_ids_buf[idx, len(datum):] = 0
else:
datum = [cls_id]
for tokens in selected_lines[pivot:]:
datum.extend(tokens)
datum.append(sep_id)
type_ids_buf[idx, :len(datum)] = 0
for tokens in selected_lines[:pivot]:
datum.extend(tokens)
datum.append(sep_id)
next_sentence_label = 1
type_ids_buf[idx, len(datum):] = 0
input_ids_buf[idx] = pad_id
input_ids_buf[idx, :len(datum)] = datum
next_sentence_labels_buf[idx] = next_sentence_label
def c4_masked_lm_inputs(
batch_size, tokenizer, max_seq_length,
max_predictions_per_seq, masking_rate,
mask_token_proportion,
random_token_proportion):
""""Generates a batch of masked examples from the C4 corpus.
Args:
batch_size: Number of examples in each batch.
tokenizer: Tokenizer for converting text to integers representations.
max_seq_length: Sequences longer than this are truncated; shorter sequences
are padded.
max_predictions_per_seq: Maximum number of masked LM predictions per
sequence.
masking_rate: Proportion of tokens for masked LM predictions. Total number
of selected tokens will be at most max_predictions_per_seq.
mask_token_proportion: Proportion of masked tokens to replace with ['MASK'].
random_token_proportion: Proportion of masked tokens to replace with a
random token. Remaining 1-mask_token_proportion-random_token_proportion
fraction of selected tokens are left as is.
Yields:
Batches of examples with keys and array types/shapes:
* "input_ids": <np.int32>[batch_size, max_seq_length]
* "type_ids": <np.int32>[batch_size, max_seq_length]
* "masked_lm_positions": <np.int32>[batch_size, max_predictions_per_seq]
* "masked_lm_ids": <np.int32>[batch_size ,max_predictions_per_seq]
* "masked_lm_weights": <np.int32>[batch_size, max_predictions_per_seq]
* "next_sentence_labels": <np.int32>[batch_size]
"""
total = mask_token_proportion + random_token_proportion
if total < 0 or total > 1:
raise ValueError(
"Sum of random proportion and mask proportion must be in [0, 1] range. "
"Got random_token_proportion=%d and mask_token_proportion=%d" %
(random_token_proportion, mask_token_proportion))
pad_id = tokenizer.pad_id()
eos_id = tokenizer.eos_id()
bos_id = tokenizer.bos_id()
cls_id = tokenizer.PieceToId("[CLS]")
sep_id = tokenizer.PieceToId("[SEP]")
mask_id = tokenizer.PieceToId("[MASK]")
ignore_ids = [cls_id, sep_id, pad_id]
ignore_ids = np.array(ignore_ids, dtype=np.int32)[:, None]
special_tokens = {mask_id, cls_id, sep_id, bos_id, eos_id, pad_id}
normal_tokens = [
t for t in range(tokenizer.GetPieceSize()) if t not in special_tokens
]
it = _c4_data_unbatched(tokenizer, max_seq_length)
examples = []
while True:
example = next(it)
num_tokens = np.sum(example["input_ids"] != pad_id).item()
prediction_mask = np.all(example["input_ids"] != ignore_ids, axis=0)
cand_indexes = np.arange(
prediction_mask.shape[0], dtype=np.int32)[prediction_mask]
num_to_predict = min(max_predictions_per_seq,
max(1, int(num_tokens * masking_rate)))
masked_lm_positions = np.random.choice(
cand_indexes, num_to_predict, replace=False)
masked_lm_positions = np.sort(masked_lm_positions)
masked_lm_ids = example["input_ids"][masked_lm_positions]
masked_lm_weights = np.ones_like(masked_lm_positions, dtype=np.float32)
# Mask out tokens.
for position in masked_lm_positions:
rand = np.random.random()
if rand < mask_token_proportion:
replace_token_id = mask_id
elif rand < mask_token_proportion + random_token_proportion:
replace_token_id = np.random.choice(normal_tokens, 1).item()
else:
replace_token_id = example["input_ids"][position]
example["input_ids"][position] = replace_token_id
amount_to_pad = max_predictions_per_seq - num_to_predict
masked_lm_positions = np.pad(
masked_lm_positions, (0, amount_to_pad), mode="constant")
masked_lm_ids = np.pad(masked_lm_ids, (0, amount_to_pad), mode="constant")
masked_lm_weights = np.pad(
masked_lm_weights, (0, amount_to_pad), mode="constant")
example["masked_lm_positions"] = masked_lm_positions
example["masked_lm_ids"] = masked_lm_ids
example["masked_lm_weights"] = masked_lm_weights
examples.append(example)
if len(examples) == batch_size:
yield jax.tree_multimap(lambda *x: np.stack(x), *examples)
examples = []
|
google-research/google-research
|
f_net/input_pipeline.py
|
Python
|
apache-2.0
| 11,332
|
"""
adafruit.py
Adafruit.io views
"""
import logging
import json
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
from datetime import datetime, timedelta
from flask import Blueprint, abort, request, jsonify
from werkzeug.exceptions import BadRequest
from app.decorators import crossdomain
from app.views.auth import OAUTH
from app.models.adventure import Adventure
from app.models.adafruit import Adafruit
from app.models.point import Point
AIO_URL = 'https://io.adafruit.com'
MOD_ADAFRUIT = Blueprint('adafruit', __name__, url_prefix='/api/v1/adventure/<slug>/adafruit')
def get_last_point(adventure):
"""Returns the last adafruit point for the adventure."""
points = adventure.points.filter( \
source='adafruit' \
)
point = None
for doc in points:
if point is None or doc.timestamp > point.timestamp:
point = doc
return point
def load_data(base_url, username, feed, aio_key, adventure, start_time=None):
"""Load Adafruit.io data."""
request_headers = {
"X-AIO-Key": aio_key
}
if start_time is None:
last_point = get_last_point(adventure)
if last_point is not None:
start_time = last_point.timestamp
else:
start_time = datetime(2017, 1, 1)
request_params = {}
if start_time is not None:
delta = timedelta(seconds=60)
request_params['start_time'] = datetime.strftime( \
start_time + delta, \
'%Y-%m-%dT%H:%M:%SZ' \
)
req = urllib.request.Request( \
AIO_URL + base_url + '/' + username + '/feeds/' + feed + '/data' + \
'?' + urllib.parse.urlencode(request_params), \
headers=request_headers \
)
res = urllib.request.urlopen(req)
data = json.load(res)
if len(data) > 0:
for point in data:
try:
aio_id = point['id']
timestamp = datetime.strptime(point['created_at'], '%Y-%m-%dT%H:%M:%SZ')
longitude = float(point['lat'])
latitude = float(point['lon'])
altitude = str(point['ele'])
value = str(point['value'])
value_arr = value.split(':')
speed = value_arr[0]
battery = value_arr[1]
if aio_id is not None:
point = adventure.points.filter( \
point_type='tracker', aio_id=aio_id \
).first()
if point is None:
point = Point(
title='Adafruit.io tracker information received.',
desc=None,
altitude=altitude,
speed=speed,
direction=None,
latitude=latitude,
longitude=longitude,
resource=None,
point_type='tracker',
timestamp=timestamp,
delorme_id=None,
aio_id=aio_id,
hide=False,
thumb=None,
photo=None,
video=None,
source="adafruit",
battery=battery,
user=None
)
adventure.points.append(point)
except (ValueError, TypeError) as err:
logging.warning(err)
logging.warning(point)
adventure.save()
return jsonify({'status': 'ok'})
@MOD_ADAFRUIT.route('/', methods=['POST'])
@crossdomain(origin='*')
@OAUTH.require_oauth('email')
def add_adafruit(slug):
"""Add Adafruit.io configuration to Adventure object defined by slug."""
try:
adventure = Adventure.objects.get(slug=slug)
base_url = request.values.get('base_url', None)
username = request.values.get('username', None)
feed = request.values.get('feed', None)
aio_key = request.values.get('aio_key', None)
adventure.adafruit = Adafruit(
base_url=base_url,
username=username,
feed=feed,
aio_key=aio_key
)
adventure.save()
return jsonify({'status': 'ok'})
except TypeError as err:
logging.error(err)
abort(400)
except BadRequest:
abort(400)
return
@MOD_ADAFRUIT.route('/', methods=['GET'])
@crossdomain(origin='*')
@OAUTH.require_oauth('email')
def get_adafruit(slug):
"""Get Adafruit.io information."""
try:
adventure = Adventure.objects.get(slug=slug)
if adventure.adafruit:
return jsonify({'adafruit': adventure.adafruit.to_dict()})
return jsonify({'error': 'Adafruit.io is not configured for this adventure.'}), 400
except TypeError as err:
logging.error(err)
abort(400)
except BadRequest:
abort(400)
return
@MOD_ADAFRUIT.route('/load', methods=['GET'])
@OAUTH.require_oauth('email')
def load_tracker(slug):
"""Load Adafruit.io tracker points from configured feed URL."""
adventure = Adventure.objects().get(slug=slug)
adafruit = adventure.adafruit
if adafruit is not None:
return load_data( \
adafruit.base_url, adafruit.username, adafruit.feed, adafruit.aio_key, adventure \
)
return jsonify({'error': 'Adafruit.io is not configured for this adventure.'}), 500
|
myadventure/myadventure-api
|
app/views/adafruit.py
|
Python
|
apache-2.0
| 5,535
|
import asyncio
import json
import socket
import unittest
from aiohttp import web, request
from aiohttp_session import (Session, session_middleware,
get_session, SimpleCookieStorage)
class TestSimleCookieStorage(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.srv = None
self.handler = None
def tearDown(self):
self.loop.run_until_complete(self.handler.finish_connections())
self.srv.close()
self.loop.close()
def find_unused_port(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 0))
port = s.getsockname()[1]
s.close()
return port
@asyncio.coroutine
def create_server(self, method, path, handler=None):
middleware = session_middleware(SimpleCookieStorage())
app = web.Application(middlewares=[middleware], loop=self.loop)
if handler:
app.router.add_route(method, path, handler)
port = self.find_unused_port()
handler = app.make_handler()
srv = yield from self.loop.create_server(
handler, '127.0.0.1', port)
url = "http://127.0.0.1:{}".format(port) + path
self.handler = handler
self.srv = srv
return app, srv, url
def make_cookie(self, data):
value = json.dumps(data)
return {'AIOHTTP_SESSION': value}
def test_create_new_sesssion(self):
@asyncio.coroutine
def handler(request):
session = yield from get_session(request)
self.assertIsInstance(session, Session)
self.assertTrue(session.new)
self.assertFalse(session._changed)
self.assertEqual({}, session)
return web.Response(body=b'OK')
@asyncio.coroutine
def go():
_, _, url = yield from self.create_server('GET', '/', handler)
resp = yield from request('GET', url, loop=self.loop)
self.assertEqual(200, resp.status)
self.loop.run_until_complete(go())
def test_load_existing_sesssion(self):
@asyncio.coroutine
def handler(request):
session = yield from get_session(request)
self.assertIsInstance(session, Session)
self.assertFalse(session.new)
self.assertFalse(session._changed)
self.assertEqual({'a': 1, 'b': 2}, session)
return web.Response(body=b'OK')
@asyncio.coroutine
def go():
_, _, url = yield from self.create_server('GET', '/', handler)
resp = yield from request(
'GET', url,
cookies=self.make_cookie({'a': 1, 'b': 2}),
loop=self.loop)
self.assertEqual(200, resp.status)
self.loop.run_until_complete(go())
def test_change_sesssion(self):
@asyncio.coroutine
def handler(request):
session = yield from get_session(request)
session['c'] = 3
return web.Response(body=b'OK')
@asyncio.coroutine
def go():
_, _, url = yield from self.create_server('GET', '/', handler)
resp = yield from request(
'GET', url,
cookies=self.make_cookie({'a': 1, 'b': 2}),
loop=self.loop)
self.assertEqual(200, resp.status)
morsel = resp.cookies['AIOHTTP_SESSION']
self.assertEqual({'a': 1, 'b': 2, 'c': 3}, eval(morsel.value))
self.assertTrue(morsel['httponly'])
self.assertEqual('/', morsel['path'])
self.loop.run_until_complete(go())
def test_clear_cookie_on_sesssion_invalidation(self):
@asyncio.coroutine
def handler(request):
session = yield from get_session(request)
session.invalidate()
return web.Response(body=b'OK')
@asyncio.coroutine
def go():
_, _, url = yield from self.create_server('GET', '/', handler)
resp = yield from request(
'GET', url,
cookies=self.make_cookie({'a': 1, 'b': 2}),
loop=self.loop)
self.assertEqual(200, resp.status)
self.assertEqual(
'Set-Cookie: AIOHTTP_SESSION="{}"; httponly; Path=/'.upper(),
resp.cookies['AIOHTTP_SESSION'].output().upper())
self.loop.run_until_complete(go())
def test_dont_save_not_requested_session(self):
@asyncio.coroutine
def handler(request):
return web.Response(body=b'OK')
@asyncio.coroutine
def go():
_, _, url = yield from self.create_server('GET', '/', handler)
resp = yield from request(
'GET', url,
cookies=self.make_cookie({'a': 1, 'b': 2}),
loop=self.loop)
self.assertEqual(200, resp.status)
self.assertNotIn('AIOHTTP_SESSION', resp.cookies)
self.loop.run_until_complete(go())
|
kolko/aiohttp_session
|
tests/test_cookie_storage.py
|
Python
|
apache-2.0
| 5,099
|
import socket
import datetime
from pymongo import MongoClient
import sys
import getpass
__author__ = "SREEJITH KOVILAKATHUVEETTIL CHANDRAN"
__copyright__ = " Copyright 2015,SREEJITH KOVILAKATHUVEETTIL CHANDRAN"
__email__ = "sreeju_kc@hotmail.com"
__license__ = "Apache License 2.0"
def fipandhost():
ip = socket.gethostbyname(socket.gethostname())#To fetch the IP address
host = socket.getfqdn()#To fetch the hostname
user = getpass.getuser()#To fetch the username
conn = MongoClient()
conn = MongoClient('mongodb://X.X.X.X:27017/')#You need to setup a mongoDB server first and provide mongoDB server IP address.
db = conn.socail_eng
collection = db.sc_feed
post = {"IP": ip,
"Hostname": host,
"Username": user}
pf = collection.find({"IP":ip,"Hostname":host}).count()
if pf > 0: #This will make sure no duplicate entry is created,even if user clicks multiple times it wont matter
sys.exit()
collection.insert(post)
if __name__ == '__main__':
fipandhost()
|
sreejithkchandran/findwhoclicks
|
click.py
|
Python
|
apache-2.0
| 1,032
|
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Dict, Union
import numpy as np
from pyquil.parser import parse
from pyquil.quilbase import (
Declare,
DefGate,
Gate as PyQuilGate,
Measurement as PyQuilMeasurement,
Pragma,
Reset,
ResetQubit,
)
from cirq import Circuit, LineQubit
from cirq.ops import (
CCNOT,
CNOT,
CSWAP,
CZ,
CZPowGate,
Gate,
H,
I,
ISWAP,
ISwapPowGate,
MatrixGate,
MeasurementGate,
S,
SWAP,
T,
TwoQubitDiagonalGate,
X,
Y,
Z,
ZPowGate,
rx,
ry,
rz,
)
class UndefinedQuilGate(Exception):
pass
class UnsupportedQuilInstruction(Exception):
pass
#
# Functions for converting supported parameterized Quil gates.
#
def cphase(param: float) -> CZPowGate:
"""Returns a controlled-phase gate as a Cirq CZPowGate with exponent
determined by the input param. The angle parameter of pyQuil's CPHASE
gate and the exponent of Cirq's CZPowGate differ by a factor of pi.
Args:
param: Gate parameter (in radians).
Returns:
A CZPowGate equivalent to a CPHASE gate of given angle.
"""
return CZPowGate(exponent=param / np.pi)
def cphase00(phi: float) -> TwoQubitDiagonalGate:
"""Returns a Cirq TwoQubitDiagonalGate for pyQuil's CPHASE00 gate.
In pyQuil, CPHASE00(phi) = diag([exp(1j * phi), 1, 1, 1]), and in Cirq,
a TwoQubitDiagonalGate is specified by its diagonal in radians, which
would be [phi, 0, 0, 0].
Args:
phi: Gate parameter (in radians).
Returns:
A TwoQubitDiagonalGate equivalent to a CPHASE00 gate of given angle.
"""
return TwoQubitDiagonalGate([phi, 0, 0, 0])
def cphase01(phi: float) -> TwoQubitDiagonalGate:
"""Returns a Cirq TwoQubitDiagonalGate for pyQuil's CPHASE01 gate.
In pyQuil, CPHASE01(phi) = diag(1, [exp(1j * phi), 1, 1]), and in Cirq,
a TwoQubitDiagonalGate is specified by its diagonal in radians, which
would be [0, phi, 0, 0].
Args:
phi: Gate parameter (in radians).
Returns:
A TwoQubitDiagonalGate equivalent to a CPHASE01 gate of given angle.
"""
return TwoQubitDiagonalGate([0, phi, 0, 0])
def cphase10(phi: float) -> TwoQubitDiagonalGate:
"""Returns a Cirq TwoQubitDiagonalGate for pyQuil's CPHASE10 gate.
In pyQuil, CPHASE10(phi) = diag(1, 1, [exp(1j * phi), 1]), and in Cirq,
a TwoQubitDiagonalGate is specified by its diagonal in radians, which
would be [0, 0, phi, 0].
Args:
phi: Gate parameter (in radians).
Returns:
A TwoQubitDiagonalGate equivalent to a CPHASE10 gate of given angle.
"""
return TwoQubitDiagonalGate([0, 0, phi, 0])
def phase(param: float) -> ZPowGate:
"""Returns a single-qubit phase gate as a Cirq ZPowGate with exponent
determined by the input param. The angle parameter of pyQuil's PHASE
gate and the exponent of Cirq's ZPowGate differ by a factor of pi.
Args:
param: Gate parameter (in radians).
Returns:
A ZPowGate equivalent to a PHASE gate of given angle.
"""
return ZPowGate(exponent=param / np.pi)
def pswap(phi: float) -> MatrixGate:
"""Returns a Cirq MatrixGate for pyQuil's PSWAP gate.
Args:
phi: Gate parameter (in radians).
Returns:
A MatrixGate equivalent to a PSWAP gate of given angle.
"""
pswap_matrix = np.array(
[
[1, 0, 0, 0],
[0, 0, np.exp(1j * phi), 0],
[0, np.exp(1j * phi), 0, 0],
[0, 0, 0, 1],
],
dtype=complex,
)
return MatrixGate(pswap_matrix)
def xy(param: float) -> ISwapPowGate:
"""Returns an ISWAP-family gate as a Cirq ISwapPowGate with exponent
determined by the input param. The angle parameter of pyQuil's XY gate
and the exponent of Cirq's ISwapPowGate differ by a factor of pi.
Args:
param: Gate parameter (in radians).
Returns:
An ISwapPowGate equivalent to an XY gate of given angle.
"""
return ISwapPowGate(exponent=param / np.pi)
PRAGMA_ERROR = """
Please remove PRAGMAs from your Quil program.
If you would like to add noise, do so after conversion.
"""
RESET_ERROR = """
Please remove RESETs from your Quil program.
RESET directives have special meaning on QCS, to enable active reset.
"""
# Parameterized gates map to functions that produce Gate constructors.
SUPPORTED_GATES: Dict[str, Union[Gate, Callable[..., Gate]]] = {
"CCNOT": CCNOT,
"CNOT": CNOT,
"CSWAP": CSWAP,
"CPHASE": cphase,
"CPHASE00": cphase00,
"CPHASE01": cphase01,
"CPHASE10": cphase10,
"CZ": CZ,
"PHASE": phase,
"H": H,
"I": I,
"ISWAP": ISWAP,
"PSWAP": pswap,
"RX": rx,
"RY": ry,
"RZ": rz,
"S": S,
"SWAP": SWAP,
"T": T,
"X": X,
"Y": Y,
"Z": Z,
"XY": xy,
}
def circuit_from_quil(quil: str) -> Circuit:
"""Convert a Quil program to a Cirq Circuit.
Args:
quil: The Quil program to convert.
Returns:
A Cirq Circuit generated from the Quil program.
Raises:
UnsupportedQuilInstruction: Cirq does not support the specified Quil instruction.
UndefinedQuilGate: Cirq does not support the specified Quil gate.
References:
https://github.com/rigetti/pyquil
"""
circuit = Circuit()
defined_gates = SUPPORTED_GATES.copy()
instructions = parse(quil)
for inst in instructions:
# Add DEFGATE-defined gates to defgates dict using MatrixGate.
if isinstance(inst, DefGate):
if inst.parameters:
raise UnsupportedQuilInstruction(
"Parameterized DEFGATEs are currently unsupported."
)
defined_gates[inst.name] = MatrixGate(inst.matrix)
# Pass when encountering a DECLARE.
elif isinstance(inst, Declare):
pass
# Convert pyQuil gates to Cirq operations.
elif isinstance(inst, PyQuilGate):
quil_gate_name = inst.name
quil_gate_params = inst.params
line_qubits = list(LineQubit(q.index) for q in inst.qubits)
if quil_gate_name not in defined_gates:
raise UndefinedQuilGate(f"Quil gate {quil_gate_name} not supported in Cirq.")
cirq_gate_fn = defined_gates[quil_gate_name]
if quil_gate_params:
circuit += cirq_gate_fn(*quil_gate_params)(*line_qubits)
else:
circuit += cirq_gate_fn(*line_qubits)
# Convert pyQuil MEASURE operations to Cirq MeasurementGate objects.
elif isinstance(inst, PyQuilMeasurement):
line_qubit = LineQubit(inst.qubit.index)
if inst.classical_reg is None:
raise UnsupportedQuilInstruction(
f"Quil measurement {inst} without classical register "
f"not currently supported in Cirq."
)
quil_memory_reference = inst.classical_reg.out()
circuit += MeasurementGate(1, key=quil_memory_reference)(line_qubit)
# Raise a targeted error when encountering a PRAGMA.
elif isinstance(inst, Pragma):
raise UnsupportedQuilInstruction(PRAGMA_ERROR)
# Raise a targeted error when encountering a RESET.
elif isinstance(inst, (Reset, ResetQubit)):
raise UnsupportedQuilInstruction(RESET_ERROR)
# Raise a general error when encountering an unconsidered type.
else:
raise UnsupportedQuilInstruction(
f"Quil instruction {inst} of type {type(inst)} not currently supported in Cirq."
)
return circuit
|
quantumlib/Cirq
|
cirq-core/cirq/contrib/quil_import/quil.py
|
Python
|
apache-2.0
| 8,276
|
"""Helper classes for Google Assistant integration."""
from __future__ import annotations
from abc import ABC, abstractmethod
from asyncio import gather
from collections.abc import Mapping
from http import HTTPStatus
import logging
import pprint
from aiohttp.web import json_response
from homeassistant.components import webhook
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_SUPPORTED_FEATURES,
CLOUD_NEVER_EXPOSED_ENTITIES,
CONF_NAME,
STATE_UNAVAILABLE,
)
from homeassistant.core import Context, HomeAssistant, State, callback
from homeassistant.helpers import start
from homeassistant.helpers.area_registry import AreaEntry
from homeassistant.helpers.device_registry import DeviceEntry
from homeassistant.helpers.entity_registry import RegistryEntry
from homeassistant.helpers.event import async_call_later
from homeassistant.helpers.network import get_url
from homeassistant.helpers.storage import Store
from . import trait
from .const import (
CONF_ALIASES,
CONF_ROOM_HINT,
DEVICE_CLASS_TO_GOOGLE_TYPES,
DOMAIN,
DOMAIN_TO_GOOGLE_TYPES,
ERR_FUNCTION_NOT_SUPPORTED,
NOT_EXPOSE_LOCAL,
SOURCE_LOCAL,
STORE_AGENT_USER_IDS,
)
from .error import SmartHomeError
SYNC_DELAY = 15
_LOGGER = logging.getLogger(__name__)
async def _get_entity_and_device(
hass: HomeAssistant, entity_id: str
) -> tuple[RegistryEntry, DeviceEntry] | None:
"""Fetch the entity and device entries for a entity_id."""
dev_reg, ent_reg = await gather(
hass.helpers.device_registry.async_get_registry(),
hass.helpers.entity_registry.async_get_registry(),
)
if not (entity_entry := ent_reg.async_get(entity_id)):
return None, None
device_entry = dev_reg.devices.get(entity_entry.device_id)
return entity_entry, device_entry
async def _get_area(
hass: HomeAssistant,
entity_entry: RegistryEntry | None,
device_entry: DeviceEntry | None,
) -> AreaEntry | None:
"""Calculate the area for an entity."""
if entity_entry and entity_entry.area_id:
area_id = entity_entry.area_id
elif device_entry and device_entry.area_id:
area_id = device_entry.area_id
else:
return None
area_reg = await hass.helpers.area_registry.async_get_registry()
return area_reg.areas.get(area_id)
async def _get_device_info(device_entry: DeviceEntry | None) -> dict[str, str] | None:
"""Retrieve the device info for a device."""
if not device_entry:
return None
device_info = {}
if device_entry.manufacturer:
device_info["manufacturer"] = device_entry.manufacturer
if device_entry.model:
device_info["model"] = device_entry.model
if device_entry.sw_version:
device_info["swVersion"] = device_entry.sw_version
return device_info
class AbstractConfig(ABC):
"""Hold the configuration for Google Assistant."""
_unsub_report_state = None
def __init__(self, hass):
"""Initialize abstract config."""
self.hass = hass
self._store = None
self._google_sync_unsub = {}
self._local_sdk_active = False
async def async_initialize(self):
"""Perform async initialization of config."""
self._store = GoogleConfigStore(self.hass)
await self._store.async_load()
if not self.enabled:
return
async def sync_google(_):
"""Sync entities to Google."""
await self.async_sync_entities_all()
start.async_at_start(self.hass, sync_google)
@property
def enabled(self):
"""Return if Google is enabled."""
return False
@property
def entity_config(self):
"""Return entity config."""
return {}
@property
def secure_devices_pin(self):
"""Return entity config."""
return None
@property
def is_reporting_state(self):
"""Return if we're actively reporting states."""
return self._unsub_report_state is not None
@property
def is_local_sdk_active(self):
"""Return if we're actively accepting local messages."""
return self._local_sdk_active
@property
def should_report_state(self):
"""Return if states should be proactively reported."""
return False
@property
def local_sdk_webhook_id(self):
"""Return the local SDK webhook ID.
Return None to disable the local SDK.
"""
return None
@property
def local_sdk_user_id(self):
"""Return the user ID to be used for actions received via the local SDK."""
raise NotImplementedError
@abstractmethod
def get_agent_user_id(self, context):
"""Get agent user ID from context."""
@abstractmethod
def should_expose(self, state) -> bool:
"""Return if entity should be exposed."""
def should_2fa(self, state):
"""If an entity should have 2FA checked."""
# pylint: disable=no-self-use
return True
async def async_report_state(self, message, agent_user_id: str):
"""Send a state report to Google."""
raise NotImplementedError
async def async_report_state_all(self, message):
"""Send a state report to Google for all previously synced users."""
jobs = [
self.async_report_state(message, agent_user_id)
for agent_user_id in self._store.agent_user_ids
]
await gather(*jobs)
@callback
def async_enable_report_state(self):
"""Enable proactive mode."""
# Circular dep
# pylint: disable=import-outside-toplevel
from .report_state import async_enable_report_state
if self._unsub_report_state is None:
self._unsub_report_state = async_enable_report_state(self.hass, self)
@callback
def async_disable_report_state(self):
"""Disable report state."""
if self._unsub_report_state is not None:
self._unsub_report_state()
self._unsub_report_state = None
async def async_sync_entities(self, agent_user_id: str):
"""Sync all entities to Google."""
# Remove any pending sync
self._google_sync_unsub.pop(agent_user_id, lambda: None)()
status = await self._async_request_sync_devices(agent_user_id)
if status == HTTPStatus.NOT_FOUND:
await self.async_disconnect_agent_user(agent_user_id)
return status
async def async_sync_entities_all(self):
"""Sync all entities to Google for all registered agents."""
res = await gather(
*(
self.async_sync_entities(agent_user_id)
for agent_user_id in self._store.agent_user_ids
)
)
return max(res, default=204)
@callback
def async_schedule_google_sync(self, agent_user_id: str):
"""Schedule a sync."""
async def _schedule_callback(_now):
"""Handle a scheduled sync callback."""
self._google_sync_unsub.pop(agent_user_id, None)
await self.async_sync_entities(agent_user_id)
self._google_sync_unsub.pop(agent_user_id, lambda: None)()
self._google_sync_unsub[agent_user_id] = async_call_later(
self.hass, SYNC_DELAY, _schedule_callback
)
@callback
def async_schedule_google_sync_all(self):
"""Schedule a sync for all registered agents."""
for agent_user_id in self._store.agent_user_ids:
self.async_schedule_google_sync(agent_user_id)
async def _async_request_sync_devices(self, agent_user_id: str) -> int:
"""Trigger a sync with Google.
Return value is the HTTP status code of the sync request.
"""
raise NotImplementedError
async def async_connect_agent_user(self, agent_user_id: str):
"""Add an synced and known agent_user_id.
Called when a completed sync response have been sent to Google.
"""
self._store.add_agent_user_id(agent_user_id)
async def async_disconnect_agent_user(self, agent_user_id: str):
"""Turn off report state and disable further state reporting.
Called when the user disconnects their account from Google.
"""
self._store.pop_agent_user_id(agent_user_id)
@callback
def async_enable_local_sdk(self):
"""Enable the local SDK."""
if (webhook_id := self.local_sdk_webhook_id) is None:
return
try:
webhook.async_register(
self.hass,
DOMAIN,
"Local Support",
webhook_id,
self._handle_local_webhook,
)
except ValueError:
_LOGGER.info("Webhook handler is already defined!")
return
self._local_sdk_active = True
@callback
def async_disable_local_sdk(self):
"""Disable the local SDK."""
if not self._local_sdk_active:
return
webhook.async_unregister(self.hass, self.local_sdk_webhook_id)
self._local_sdk_active = False
async def _handle_local_webhook(self, hass, webhook_id, request):
"""Handle an incoming local SDK message."""
# Circular dep
# pylint: disable=import-outside-toplevel
from . import smart_home
payload = await request.json()
if _LOGGER.isEnabledFor(logging.DEBUG):
_LOGGER.debug("Received local message:\n%s\n", pprint.pformat(payload))
if not self.enabled:
return json_response(smart_home.turned_off_response(payload))
result = await smart_home.async_handle_message(
self.hass, self, self.local_sdk_user_id, payload, SOURCE_LOCAL
)
if _LOGGER.isEnabledFor(logging.DEBUG):
_LOGGER.debug("Responding to local message:\n%s\n", pprint.pformat(result))
return json_response(result)
class GoogleConfigStore:
"""A configuration store for google assistant."""
_STORAGE_VERSION = 1
_STORAGE_KEY = DOMAIN
def __init__(self, hass):
"""Initialize a configuration store."""
self._hass = hass
self._store = Store(hass, self._STORAGE_VERSION, self._STORAGE_KEY)
self._data = {STORE_AGENT_USER_IDS: {}}
@property
def agent_user_ids(self):
"""Return a list of connected agent user_ids."""
return self._data[STORE_AGENT_USER_IDS]
@callback
def add_agent_user_id(self, agent_user_id):
"""Add an agent user id to store."""
if agent_user_id not in self._data[STORE_AGENT_USER_IDS]:
self._data[STORE_AGENT_USER_IDS][agent_user_id] = {}
self._store.async_delay_save(lambda: self._data, 1.0)
@callback
def pop_agent_user_id(self, agent_user_id):
"""Remove agent user id from store."""
if agent_user_id in self._data[STORE_AGENT_USER_IDS]:
self._data[STORE_AGENT_USER_IDS].pop(agent_user_id, None)
self._store.async_delay_save(lambda: self._data, 1.0)
async def async_load(self):
"""Store current configuration to disk."""
if data := await self._store.async_load():
self._data = data
class RequestData:
"""Hold data associated with a particular request."""
def __init__(
self,
config: AbstractConfig,
user_id: str,
source: str,
request_id: str,
devices: list[dict] | None,
) -> None:
"""Initialize the request data."""
self.config = config
self.source = source
self.request_id = request_id
self.context = Context(user_id=user_id)
self.devices = devices
@property
def is_local_request(self):
"""Return if this is a local request."""
return self.source == SOURCE_LOCAL
def get_google_type(domain, device_class):
"""Google type based on domain and device class."""
typ = DEVICE_CLASS_TO_GOOGLE_TYPES.get((domain, device_class))
return typ if typ is not None else DOMAIN_TO_GOOGLE_TYPES[domain]
class GoogleEntity:
"""Adaptation of Entity expressed in Google's terms."""
def __init__(
self, hass: HomeAssistant, config: AbstractConfig, state: State
) -> None:
"""Initialize a Google entity."""
self.hass = hass
self.config = config
self.state = state
self._traits = None
@property
def entity_id(self):
"""Return entity ID."""
return self.state.entity_id
@callback
def traits(self):
"""Return traits for entity."""
if self._traits is not None:
return self._traits
state = self.state
domain = state.domain
attributes = state.attributes
features = attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if not isinstance(features, int):
_LOGGER.warning(
"Entity %s contains invalid supported_features value %s",
self.entity_id,
features,
)
return []
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
self._traits = [
Trait(self.hass, state, self.config)
for Trait in trait.TRAITS
if Trait.supported(domain, features, device_class, attributes)
]
return self._traits
@callback
def should_expose(self):
"""If entity should be exposed."""
return self.config.should_expose(self.state)
@callback
def should_expose_local(self) -> bool:
"""Return if the entity should be exposed locally."""
return (
self.should_expose()
and get_google_type(
self.state.domain, self.state.attributes.get(ATTR_DEVICE_CLASS)
)
not in NOT_EXPOSE_LOCAL
and not self.might_2fa()
)
@callback
def is_supported(self) -> bool:
"""Return if the entity is supported by Google."""
return bool(self.traits())
@callback
def might_2fa(self) -> bool:
"""Return if the entity might encounter 2FA."""
if not self.config.should_2fa(self.state):
return False
return self.might_2fa_traits()
@callback
def might_2fa_traits(self) -> bool:
"""Return if the entity might encounter 2FA based on just traits."""
state = self.state
domain = state.domain
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
return any(
trait.might_2fa(domain, features, device_class) for trait in self.traits()
)
async def sync_serialize(self, agent_user_id):
"""Serialize entity for a SYNC response.
https://developers.google.com/actions/smarthome/create-app#actiondevicessync
"""
state = self.state
entity_config = self.config.entity_config.get(state.entity_id, {})
name = (entity_config.get(CONF_NAME) or state.name).strip()
domain = state.domain
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
entity_entry, device_entry = await _get_entity_and_device(
self.hass, state.entity_id
)
traits = self.traits()
device_type = get_google_type(domain, device_class)
device = {
"id": state.entity_id,
"name": {"name": name},
"attributes": {},
"traits": [trait.name for trait in traits],
"willReportState": self.config.should_report_state,
"type": device_type,
}
# use aliases
if aliases := entity_config.get(CONF_ALIASES):
device["name"]["nicknames"] = [name] + aliases
if self.config.is_local_sdk_active and self.should_expose_local():
device["otherDeviceIds"] = [{"deviceId": self.entity_id}]
device["customData"] = {
"webhookId": self.config.local_sdk_webhook_id,
"httpPort": self.hass.http.server_port,
"httpSSL": self.hass.config.api.use_ssl,
"uuid": await self.hass.helpers.instance_id.async_get(),
"baseUrl": get_url(self.hass, prefer_external=True),
"proxyDeviceId": agent_user_id,
}
for trt in traits:
device["attributes"].update(trt.sync_attributes())
if room := entity_config.get(CONF_ROOM_HINT):
device["roomHint"] = room
else:
area = await _get_area(self.hass, entity_entry, device_entry)
if area and area.name:
device["roomHint"] = area.name
if device_info := await _get_device_info(device_entry):
device["deviceInfo"] = device_info
return device
@callback
def query_serialize(self):
"""Serialize entity for a QUERY response.
https://developers.google.com/actions/smarthome/create-app#actiondevicesquery
"""
state = self.state
if state.state == STATE_UNAVAILABLE:
return {"online": False}
attrs = {"online": True}
for trt in self.traits():
deep_update(attrs, trt.query_attributes())
return attrs
@callback
def reachable_device_serialize(self):
"""Serialize entity for a REACHABLE_DEVICE response."""
return {"verificationId": self.entity_id}
async def execute(self, data, command_payload):
"""Execute a command.
https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute
"""
command = command_payload["command"]
params = command_payload.get("params", {})
challenge = command_payload.get("challenge", {})
executed = False
for trt in self.traits():
if trt.can_execute(command, params):
await trt.execute(command, data, params, challenge)
executed = True
break
if not executed:
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED,
f"Unable to execute {command} for {self.state.entity_id}",
)
@callback
def async_update(self):
"""Update the entity with latest info from Home Assistant."""
self.state = self.hass.states.get(self.entity_id)
if self._traits is None:
return
for trt in self._traits:
trt.state = self.state
def deep_update(target, source):
"""Update a nested dictionary with another nested dictionary."""
for key, value in source.items():
if isinstance(value, Mapping):
target[key] = deep_update(target.get(key, {}), value)
else:
target[key] = value
return target
@callback
def async_get_entities(
hass: HomeAssistant, config: AbstractConfig
) -> list[GoogleEntity]:
"""Return all entities that are supported by Google."""
entities = []
for state in hass.states.async_all():
if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
continue
entity = GoogleEntity(hass, config, state)
if entity.is_supported():
entities.append(entity)
return entities
|
mezz64/home-assistant
|
homeassistant/components/google_assistant/helpers.py
|
Python
|
apache-2.0
| 19,261
|
# -*- coding: utf-8 -*-
import sys
import sqlite3
from Foundation import *
from QCMsgDestoryInfo import *
from QCPicLinkInfo import *
from QCPictureInfo import *
from QCSystemMsgExtraInfoModel import *
sys.dont_write_bytecode = True
reload(sys)
sys.setdefaultencoding('utf-8')
conn = sqlite3.connect("./QCall.db")
cur = conn.cursor()
tables = cur.execute("select name from sqlite_master where type = 'table' order by name").fetchall()
file = open("./QCall.db.txt", "w+")
print >> file, tables
for table in tables:
column = cur.execute("PRAGMA table_info(%s)" % table).fetchall()
msgs = cur.execute("select * from %s" % table).fetchall()
print >> file, "**[%s][count = %d]**********************************************************" % (table, len(msgs))
for i in range(len(msgs)):
print >> file, "**[%s][%d]**********************************************************" % (table, i)
blob = 0
for j in range(len(column)):
if column[j][2] == "blob":
if msgs[i][j] is not None:
msgDestrory = NSKeyedUnarchiver.unarchiveObjectWithData_(msgs[i][j])
if msgDestrory is not None:
print >> file, column[j][1],":",msgDestrory.description()
else:
print >> file, column[j][1],":None"
else:
print >> file, column[j][1],":None"
else:
print >> file, column[j][1],"=",msgs[i][j]
file.close()
conn.close()
|
foreverwind/Script
|
QCall/QCall.db.py
|
Python
|
apache-2.0
| 1,530
|
"""Auto-generated file, do not edit by hand. SY metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_SY = PhoneMetadata(id='SY', country_code=963, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[1-39]\\d{8}|[1-5]\\d{7}', possible_length=(8, 9), possible_length_local_only=(6, 7)),
fixed_line=PhoneNumberDesc(national_number_pattern='21\\d{6,7}|(?:1(?:[14]\\d|[2356])|2[235]|3(?:[13]\\d|4)|4[134]|5[1-3])\\d{6}', example_number='112345678', possible_length=(8, 9), possible_length_local_only=(6, 7)),
mobile=PhoneNumberDesc(national_number_pattern='9(?:22|[3-689]\\d)\\d{6}', example_number='944567890', possible_length=(9,)),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['[1-5]'], national_prefix_formatting_rule='0\\1', national_prefix_optional_when_formatting=True),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['9'], national_prefix_formatting_rule='0\\1', national_prefix_optional_when_formatting=True)])
|
daviddrysdale/python-phonenumbers
|
python/phonenumbers/data/region_SY.py
|
Python
|
apache-2.0
| 1,194
|
# Generated by the windmill services transformer
from windmill.dep import functest
from windmill.authoring import WindmillTestClient
def test_recordingSuite0():
client = WindmillTestClient(__name__, assertions=False)
assert client.open(url=u'http://tutorial.getwindmill.com/windmill-unittests/unit_tester.html')['result']
assert client.waits.forPageLoad(timeout=u'8000')['result']
assert client.click(value=u'lookupByValue')['result']
assert client.click(classname=u'lookupByClassname')['result']
assert client.click(name=u'lookupByName')['result']
assert client.click(id=u'lookupById')['result']
assert client.click(jsid=u'jsNode()')['result']
assert client.click(tagname=u'hr')['result']
|
windmill/windmill
|
test/local_tests/test_lookups.py
|
Python
|
apache-2.0
| 727
|
# -*- coding: latin-1 -*-
# Copyright 2015 SICS Swedish ICT AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from threading import Thread, Event
from sh import netstat,ErrorReturnCode
import re
#import pdb
from monitor import OS_type, OS
#
# Partly based on code from the UNIFY FP7 Demo for the Y1 review 2015,
# by Pontus Sköldström, Acreo Swedish ICT AB / Per Kreuger, SICS Swedish ICT AB.
#
class Sampler(Thread):
def __init__(self, inq, outq, sample_rate, monitor, interface='eth0', name='Sampler', debug=False):
Thread.__init__(self, name=name)
self.inq = inq
self.outq = outq
self.request_event = Event()
self.set_sample_rate(sample_rate)
self.monitor = monitor
self.interface = interface
self.debug = debug
self.keep_running = True
self.tx_agg = self.rx_agg = 0.0
self.txb2 = self.rxb2 = 0.0
self.tx_file = None
self.rx_file = None
self.samples = 0
self.last_data = self.get_interface_data(self.interface)
# FIXME: These two should be inferred by checking the word
# length of the computer we're running on.
# Perhaps with platform.architecture() or sys.maxsize
# For now assume 32-bit architecture.
self.tx_byte_counter_wrap_adjustment = (2 ** 32) - 1
self.rx_byte_counter_wrap_adjustment = (2 ** 32) - 1
def run(self):
if (self.debug):
self.monitor.debugPrint("sampler.py: starting run()")
self.last_data = self.get_interface_data(self.interface)
while self.keep_running:
timestamp = self.sample()
while not self.inq.empty():
request = self.inq.get()
if request == 'rate_data':
self.inq.task_done()
self.outq.put({'ts': timestamp, 'samples': self.samples, 'txb2': self.txb2, 'rxb2': self.rxb2, 'tx_agg': self.tx_agg, 'rx_agg': self.rx_agg})
# self.samples = 0 # Don't do this!
elif request == 'stop':
self.inq.task_done()
self.keep_running = False
else:
self.inq.task_done()
pass # unknown request, FIXME: perhaps report an error?
#
# time.sleep(self.sleep_time)
self.request_event.wait(self.sleep_time)
self.request_event.clear()
if (self.debug):
# Race conditions can cause things to be pulled from under
# our feet, so catch all exceptions here.
try:
self.monitor.debugPrint("sampler.py: exit from run()")
except BaseException:
pass
# Read the traffic flow data from the network interface and accumulate the rate
def sample(self):
self.samples += 1
curr = self.get_interface_data(self.interface)
if curr[1] < self.last_data[1]:
# tx counter has wrapped
self.monitor.debugPrint("sampler.py: self.last_data[1] before wrap adjustment: " + str(self.last_data[1]))
self.last_data[1] -= self.tx_byte_counter_wrap_adjustment
tx_bytes = curr[1] - self.last_data[1]
if curr[2] < self.last_data[2]:
# rx counter has wrapped
self.monitor.debugPrint("sampler.py: self.last_data[2] before wrap adjustment: " + str(self.last_data[2]))
self.last_data[2] -= self.rx_byte_counter_wrap_adjustment
rx_bytes = curr[2] - self.last_data[2]
timestamp = curr[0] #- self.last_data[0]
obs_time = timestamp - self.last_data[0]
if self.debug:
self.monitor.debugPrint("sampler.py: curr: " + str(curr) + ", self.last_data: " + str(self.last_data))
self.monitor.debugPrint("sampler.py: tx_bytes: " + str(tx_bytes) + ", rx_bytes: " + str(rx_bytes))
self.monitor.debugPrint("sampler.py: obs_time: " + str(obs_time))
# self.last_data = curr
self.last_data = list(curr) # copy curr
tx_byte_rate = tx_bytes / obs_time
rx_byte_rate = rx_bytes / obs_time
self.tx_agg = self.tx_agg + tx_byte_rate
self.rx_agg = self.rx_agg + rx_byte_rate
self.txb2 = self.txb2 + (tx_byte_rate*tx_byte_rate)
self.rxb2 = self.rxb2 + (rx_byte_rate*rx_byte_rate)
if self.debug:
self.monitor.debugPrint("sampler.py: tx_byte_rate: " + str(tx_byte_rate) + ", rx_byte_rate: " + str(rx_byte_rate) + ", self.tx_agg: " + str(self.tx_agg) + ". self.rx_agg: " + str(self.rx_agg) + ", self.txb2: " + str(self.txb2) + ", self.rxb2: " + str(self.rxb2))
return timestamp
# Read the number of bytes received and sent to/from the network interface
def get_interface_data(self,interface):
if OS == OS_type.darwin: # OS X does not have /sys/class/net/...
# This is only for testing on OS X. Running netstat takes
# way to much time to be a practical method for reading
# the byte counters of an interface.
netstat_output = netstat("-i", "-b", "-I", interface)
for line in netstat_output:
if line.startswith(interface):
words = line.split()
rx_bytes = int(words[6])
tx_bytes = int(words[9])
return time.time(),rx_bytes,tx_bytes
return time.time(),0,0
else:
# FIXME: Perhaps open the tx_file and the rx_file in the
# __init__ method instead. Is there really a good
# reason for doing it this way?
tx_fn = "/sys/class/net/%s/statistics/tx_bytes" % interface
rx_fn = "/sys/class/net/%s/statistics/rx_bytes" % interface
if self.tx_file is None:
self.tx_file = open(tx_fn)
tx_bytes = int(self.tx_file.read())
else:
self.tx_file.seek(0)
tx_bytes = int(self.tx_file.read())
if self.rx_file is None:
self.rx_file = open(rx_fn)
rx_bytes = int(self.rx_file.read())
else:
self.rx_file.seek(0)
rx_bytes = int(self.rx_file.read())
# return time.time(), tx_bytes, rx_bytes
return [time.time(), tx_bytes, rx_bytes]
#
def set_sample_rate(self,sample_rate):
self.sample_rate = sample_rate
self.sleep_time = 1.0 / sample_rate
def get_sample_rate(self):
return self.sample_rate
def set_interface(self,interface):
self.interface = interface
def get_interface(self):
return self.interface
def running(self):
return self.keep_running
def stopped(self):
return not self.keep_running
|
nigsics/ramon
|
sampler.py
|
Python
|
apache-2.0
| 7,385
|
# Copyright (c) 2012 Citrix Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Aggregate admin API extension."""
from webob import exc
from nova.api.openstack import extensions
from nova import compute
from nova import exception
from nova import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'aggregates')
def _get_context(req):
return req.environ['nova.context']
def get_host_from_body(fn):
"""Makes sure that the host exists."""
def wrapped(self, req, id, body, *args, **kwargs):
if len(body) == 1 and "host" in body:
host = body['host']
else:
raise exc.HTTPBadRequest
return fn(self, req, id, host, *args, **kwargs)
return wrapped
class AggregateController(object):
"""The Host Aggregates API controller for the OpenStack API."""
def __init__(self):
self.api = compute.AggregateAPI()
def index(self, req):
"""Returns a list a host aggregate's id, name, availability_zone."""
context = _get_context(req)
authorize(context)
aggregates = self.api.get_aggregate_list(context)
return {'aggregates': aggregates}
def create(self, req, body):
"""Creates an aggregate, given its name and availablity_zone."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest
try:
host_aggregate = body["aggregate"]
aggregate_name = host_aggregate["name"]
availability_zone = host_aggregate["availability_zone"]
except KeyError:
raise exc.HTTPBadRequest
if len(host_aggregate) != 2:
raise exc.HTTPBadRequest
try:
aggregate = self.api.create_aggregate(context, aggregate_name,
availability_zone)
except exception.AggregateNameExists:
raise exc.HTTPConflict
return self._marshall_aggregate(aggregate)
def show(self, req, id):
"""Shows the details of an aggregate, hosts and metadata included."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.get_aggregate(context, id)
except exception.AggregateNotFound:
raise exc.HTTPNotFound
return self._marshall_aggregate(aggregate)
def update(self, req, id, body):
"""Updates the name and/or availbility_zone of given aggregate."""
context = _get_context(req)
authorize(context)
aggregate = id
if len(body) != 1:
raise exc.HTTPBadRequest
try:
updates = body["aggregate"]
except KeyError:
raise exc.HTTPBadRequest
if len(updates) < 1:
raise exc.HTTPBadRequest
for key in updates.keys():
if not key in ["name", "availability_zone"]:
raise exc.HTTPBadRequest
try:
aggregate = self.api.update_aggregate(context, aggregate, updates)
except exception.AggregateNotFound:
raise exc.HTTPNotFound
return self._marshall_aggregate(aggregate)
def delete(self, req, id):
"""Removes an aggregate by id."""
context = _get_context(req)
authorize(context)
aggregate_id = id
try:
self.api.delete_aggregate(context, aggregate_id)
except exception.AggregateNotFound:
raise exc.HTTPNotFound
def action(self, req, id, body):
_actions = {
'add_host': self._add_host,
'remove_host': self._remove_host,
'set_metadata': self._set_metadata,
}
for action, data in body.iteritems():
try:
return _actions[action](req, id, data)
except KeyError:
msg = _("Aggregates does not have %s action") % action
raise exc.HTTPBadRequest(explanation=msg)
raise exc.HTTPBadRequest(explanation=_("Invalid request body"))
@get_host_from_body
def _add_host(self, req, id, host):
"""Adds a host to the specified aggregate."""
context = _get_context(req)
authorize(context)
aggregate = id
try:
aggregate = self.api.add_host_to_aggregate(context,
aggregate, host)
except exception.AggregateNotFound:
raise exc.HTTPNotFound
except exception.ComputeHostNotFound:
raise exc.HTTPNotFound
except exception.AggregateHostConflict:
raise exc.HTTPConflict
except exception.AggregateHostExists:
raise exc.HTTPConflict
except exception.InvalidAggregateAction:
raise exc.HTTPConflict
return self._marshall_aggregate(aggregate)
@get_host_from_body
def _remove_host(self, req, id, host):
"""Removes a host from the specified aggregate."""
context = _get_context(req)
authorize(context)
aggregate = id
try:
aggregate = self.api.remove_host_from_aggregate(context,
aggregate, host)
except exception.AggregateNotFound:
raise exc.HTTPNotFound
except exception.AggregateHostNotFound:
raise exc.HTTPNotFound
except exception.InvalidAggregateAction:
raise exc.HTTPConflict
return self._marshall_aggregate(aggregate)
def _set_metadata(self, req, id, body):
"""Replaces the aggregate's existing metadata with new metadata."""
context = _get_context(req)
authorize(context)
aggregate = id
if len(body) != 1:
raise exc.HTTPBadRequest
try:
metadata = body["metadata"]
except KeyError:
raise exc.HTTPBadRequest
try:
aggregate = self.api.update_aggregate_metadata(context,
aggregate, metadata)
except exception.AggregateNotFound:
raise exc.HTTPNotFound
return self._marshall_aggregate(aggregate)
def _marshall_aggregate(self, aggregate):
return {"aggregate": aggregate}
class Aggregates(extensions.ExtensionDescriptor):
"""Admin-only aggregate administration"""
name = "Aggregates"
alias = "os-aggregates"
namespace = "http://docs.openstack.org/compute/ext/aggregates/api/v1.1"
updated = "2012-01-12T00:00:00+00:00"
def __init__(self, ext_mgr):
ext_mgr.register(self)
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-aggregates',
AggregateController(),
member_actions={"action": "POST", })
resources.append(res)
return resources
|
russellb/nova
|
nova/api/openstack/compute/contrib/aggregates.py
|
Python
|
apache-2.0
| 7,501
|
# Copyright 2017 <thenakliman@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import psutil
from nirikshak.tests.unit import base
from nirikshak.workers.process import running
class RunningProcessWorkerTest(base.BaseTestCase):
@staticmethod
def _get_fake_process_jaanch():
jaanch = {
'type': 'process_running',
'input': {
'args': {
'name': 'ssh'
}
}
}
return jaanch
@staticmethod
def _get_fake_process_details():
mk1 = mock.Mock()
mk1.name = mock.Mock(return_value='ssh')
mk2 = mock.Mock()
mk2.name = mock.Mock(return_value='abssh')
return [mk1, mk2]
@mock.patch.object(psutil, 'process_iter')
def test_process_running(self, mock_psutil):
jaanch = self._get_fake_process_jaanch()
mock_psutil.return_value = self._get_fake_process_details()
result = running.RunningProcessWorker().work(**jaanch)
self.assertTrue(result['input']['result'])
@mock.patch.object(psutil, 'process_iter')
def test_process_running_invalid(self, mock_psutil):
jaanch = self._get_fake_process_jaanch()
names = self._get_fake_process_details()
names[0].name.return_value = 'test_service'
mock_psutil.return_value = names
jaanch['input']['args']['name'] = 'invalid_process'
result = running.RunningProcessWorker().work(**jaanch)
self.assertFalse(result['input']['result'])
@mock.patch.object(psutil, 'process_iter')
def test_process_raise_invalid(self, mock_psutil):
jaanch = self._get_fake_process_jaanch()
names = self._get_fake_process_details()
names[0].name.side_effect = psutil.NoSuchProcess('test')
mock_psutil.return_value = names
jaanch['input']['args']['name'] = 'ssh'
result = running.RunningProcessWorker().work(**jaanch)
self.assertFalse(result['input']['result'])
|
thenakliman/nirikshak
|
nirikshak/tests/unit/workers/process/test_process.py
|
Python
|
apache-2.0
| 2,516
|
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests to the conductor service."""
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from nova import baserpc
from nova.conductor import manager
from nova.conductor import rpcapi
from nova.i18n import _LI, _LW
from nova import utils
conductor_opts = [
cfg.BoolOpt('use_local',
default=False,
help='Perform nova-conductor operations locally'),
cfg.StrOpt('topic',
default='conductor',
help='The topic on which conductor nodes listen'),
cfg.StrOpt('manager',
default='nova.conductor.manager.ConductorManager',
help='Full class name for the Manager for conductor'),
cfg.IntOpt('workers',
help='Number of workers for OpenStack Conductor service. '
'The default will be the number of CPUs available.')
]
conductor_group = cfg.OptGroup(name='conductor',
title='Conductor Options')
CONF = cfg.CONF
CONF.register_group(conductor_group)
CONF.register_opts(conductor_opts, conductor_group)
LOG = logging.getLogger(__name__)
class LocalAPI(object):
"""A local version of the conductor API that does database updates
locally instead of via RPC.
"""
def __init__(self):
# TODO(danms): This needs to be something more generic for
# other/future users of this sort of functionality.
self._manager = utils.ExceptionHelper(manager.ConductorManager())
def wait_until_ready(self, context, *args, **kwargs):
# nothing to wait for in the local case.
pass
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database."""
return self._manager.instance_update(context, instance_uuid,
updates, 'compute')
def instance_get_all_by_host(self, context, host, columns_to_join=None):
return self._manager.instance_get_all_by_host(
context, host, None, columns_to_join=columns_to_join)
def instance_get_all_by_host_and_node(self, context, host, node):
return self._manager.instance_get_all_by_host(context, host, node,
None)
def migration_get_in_progress_by_host_and_node(self, context, host, node):
return self._manager.migration_get_in_progress_by_host_and_node(
context, host, node)
def aggregate_metadata_get_by_host(self, context, host,
key='availability_zone'):
return self._manager.aggregate_metadata_get_by_host(context,
host,
key)
def provider_fw_rule_get_all(self, context):
return self._manager.provider_fw_rule_get_all(context)
def block_device_mapping_create(self, context, values):
return self._manager.block_device_mapping_update_or_create(context,
values,
create=True)
def block_device_mapping_update(self, context, bdm_id, values):
values = dict(values)
values['id'] = bdm_id
return self._manager.block_device_mapping_update_or_create(
context, values, create=False)
def block_device_mapping_update_or_create(self, context, values):
return self._manager.block_device_mapping_update_or_create(context,
values,
create=None)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy=True):
return self._manager.block_device_mapping_get_all_by_instance(
context, instance, legacy)
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
return self._manager.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance, last_refreshed,
update_totals)
def service_get_all(self, context):
return self._manager.service_get_all_by(context, host=None, topic=None,
binary=None)
def service_get_all_by_topic(self, context, topic):
return self._manager.service_get_all_by(context, topic=topic,
host=None, binary=None)
def service_get_all_by_host(self, context, host):
return self._manager.service_get_all_by(context, host=host, topic=None,
binary=None)
def service_get_by_host_and_topic(self, context, host, topic):
return self._manager.service_get_all_by(context, topic, host,
binary=None)
def service_get_by_compute_host(self, context, host):
result = self._manager.service_get_all_by(context, 'compute', host,
binary=None)
# FIXME(comstud): A major revision bump to 2.0 should return a
# single entry, so we should just return 'result' at that point.
return result[0]
def service_get_by_host_and_binary(self, context, host, binary):
return self._manager.service_get_all_by(context, host=host,
binary=binary, topic=None)
def service_create(self, context, values):
return self._manager.service_create(context, values)
def service_destroy(self, context, service_id):
return self._manager.service_destroy(context, service_id)
def compute_node_create(self, context, values):
return self._manager.compute_node_create(context, values)
def compute_node_update(self, context, node, values, prune_stats=False):
# NOTE(belliott) ignore prune_stats param, it's no longer relevant
return self._manager.compute_node_update(context, node, values)
def compute_node_delete(self, context, node):
return self._manager.compute_node_delete(context, node)
def service_update(self, context, service, values):
return self._manager.service_update(context, service, values)
def task_log_get(self, context, task_name, begin, end, host, state=None):
return self._manager.task_log_get(context, task_name, begin, end,
host, state)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
return self._manager.task_log_begin_task(context, task_name,
begin, end, host,
task_items, message)
def task_log_end_task(self, context, task_name, begin, end, host,
errors, message=None):
return self._manager.task_log_end_task(context, task_name,
begin, end, host,
errors, message)
def security_groups_trigger_handler(self, context, event, *args):
return self._manager.security_groups_trigger_handler(context,
event, args)
def security_groups_trigger_members_refresh(self, context, group_ids):
return self._manager.security_groups_trigger_members_refresh(context,
group_ids)
def object_backport(self, context, objinst, target_version):
return self._manager.object_backport(context, objinst, target_version)
class LocalComputeTaskAPI(object):
def __init__(self):
# TODO(danms): This needs to be something more generic for
# other/future users of this sort of functionality.
self._manager = utils.ExceptionHelper(
manager.ComputeTaskManager())
def resize_instance(self, context, instance, extra_instance_updates,
scheduler_hint, flavor, reservations,
clean_shutdown=True):
# NOTE(comstud): 'extra_instance_updates' is not used here but is
# needed for compatibility with the cells_rpcapi version of this
# method.
self._manager.migrate_server(
context, instance, scheduler_hint, live=False, rebuild=False,
flavor=flavor, block_migration=None, disk_over_commit=None,
reservations=reservations, clean_shutdown=clean_shutdown)
def live_migrate_instance(self, context, instance, host_name,
block_migration, disk_over_commit):
scheduler_hint = {'host': host_name}
self._manager.migrate_server(
context, instance, scheduler_hint, True, False, None,
block_migration, disk_over_commit, None)
def build_instances(self, context, instances, image,
filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
legacy_bdm=True):
utils.spawn_n(self._manager.build_instances, context,
instances=instances, image=image,
filter_properties=filter_properties,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=legacy_bdm)
def unshelve_instance(self, context, instance):
utils.spawn_n(self._manager.unshelve_instance, context,
instance=instance)
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate=False, on_shared_storage=False,
preserve_ephemeral=False, host=None, kwargs=None):
# kwargs unused but required for cell compatibility.
utils.spawn_n(self._manager.rebuild_instance, context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=recreate,
on_shared_storage=on_shared_storage,
host=host,
preserve_ephemeral=preserve_ephemeral)
class API(LocalAPI):
"""Conductor API that does updates via RPC to the ConductorManager."""
def __init__(self):
self._manager = rpcapi.ConductorAPI()
self.base_rpcapi = baserpc.BaseAPI(topic=CONF.conductor.topic)
def wait_until_ready(self, context, early_timeout=10, early_attempts=10):
'''Wait until a conductor service is up and running.
This method calls the remote ping() method on the conductor topic until
it gets a response. It starts with a shorter timeout in the loop
(early_timeout) up to early_attempts number of tries. It then drops
back to the globally configured timeout for rpc calls for each retry.
'''
attempt = 0
timeout = early_timeout
# if we show the timeout message, make sure we show a similar
# message saying that everything is now working to avoid
# confusion
has_timedout = False
while True:
# NOTE(danms): Try ten times with a short timeout, and then punt
# to the configured RPC timeout after that
if attempt == early_attempts:
timeout = None
attempt += 1
# NOTE(russellb): This is running during service startup. If we
# allow an exception to be raised, the service will shut down.
# This may fail the first time around if nova-conductor wasn't
# running when this service started.
try:
self.base_rpcapi.ping(context, '1.21 GigaWatts',
timeout=timeout)
if has_timedout:
LOG.info(_LI('nova-conductor connection '
'established successfully'))
break
except messaging.MessagingTimeout:
has_timedout = True
LOG.warning(_LW('Timed out waiting for nova-conductor. '
'Is it running? Or did this service start '
'before nova-conductor? '
'Reattempting establishment of '
'nova-conductor connection...'))
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database."""
return self._manager.instance_update(context, instance_uuid,
updates, 'conductor')
class ComputeTaskAPI(object):
"""ComputeTask API that queues up compute tasks for nova-conductor."""
def __init__(self):
self.conductor_compute_rpcapi = rpcapi.ComputeTaskAPI()
def resize_instance(self, context, instance, extra_instance_updates,
scheduler_hint, flavor, reservations,
clean_shutdown=True):
# NOTE(comstud): 'extra_instance_updates' is not used here but is
# needed for compatibility with the cells_rpcapi version of this
# method.
self.conductor_compute_rpcapi.migrate_server(
context, instance, scheduler_hint, live=False, rebuild=False,
flavor=flavor, block_migration=None, disk_over_commit=None,
reservations=reservations, clean_shutdown=clean_shutdown)
def live_migrate_instance(self, context, instance, host_name,
block_migration, disk_over_commit):
scheduler_hint = {'host': host_name}
self.conductor_compute_rpcapi.migrate_server(
context, instance, scheduler_hint, True, False, None,
block_migration, disk_over_commit, None)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
self.conductor_compute_rpcapi.build_instances(context,
instances=instances, image=image,
filter_properties=filter_properties,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=legacy_bdm)
def unshelve_instance(self, context, instance):
self.conductor_compute_rpcapi.unshelve_instance(context,
instance=instance)
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate=False, on_shared_storage=False,
preserve_ephemeral=False, host=None, kwargs=None):
# kwargs unused but required for cell compatibility
self.conductor_compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=recreate,
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
host=host)
|
yanheven/nova
|
nova/conductor/api.py
|
Python
|
apache-2.0
| 16,772
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of a TD3 agent.
Implementation of TD3 - Twin Delayed Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
https://arxiv.org/pdf/1802.09477.pdf
"""
import agent
from common import replay_buffer
from common.actor_critic import ActorNetwork
from common.actor_critic import CriticNetwork
import numpy as np
class TD3(agent.Agent):
"""TD3 agent."""
def __init__(self, env, sess, config):
"""Initialize members."""
state_dim = env.observation_space.shape[0]
self.env = env
self.action_dim = env.action_space.shape[0]
self.action_high = env.action_space.high
self.action_low = env.action_space.low
self.batch_size = config.batch_size
self.warmup_size = config.warmup_size
self.gamma = config.gamma
self.sigma = config.sigma
self.sigma_tilda = config.sigma_tilda
self.noise_cap = config.c
self.train_interval = config.d
self.actor = ActorNetwork(sess=sess,
state_dim=state_dim,
action_dim=self.action_dim,
action_high=self.action_high,
action_low=self.action_low,
learning_rate=config.actor_lr,
grad_norm_clip=config.grad_norm_clip,
tau=config.tau,
batch_size=config.batch_size)
self.critic1 = CriticNetwork(sess=sess,
state_dim=state_dim,
action_dim=self.action_dim,
learning_rate=config.critic_lr,
tau=config.tau,
gamma=config.gamma,
name='critic1')
self.critic2 = CriticNetwork(sess=sess,
state_dim=state_dim,
action_dim=self.action_dim,
learning_rate=config.critic_lr,
tau=config.tau,
gamma=config.gamma,
name='critic2')
self.replay_buffer = replay_buffer.ReplayBuffer(
buffer_size=config.buffer_size)
def initialize(self):
"""Initialization before playing."""
self.update_targets()
def random_action(self, observation):
"""Return a random action."""
return self.env.action_space.sample()
def action(self, observation):
"""Return an action according to the agent's policy."""
return self.actor.get_action(observation)
def action_with_noise(self, observation):
"""Return a noisy action."""
if self.replay_buffer.size > self.warmup_size:
action = self.action(observation)
else:
action = self.random_action(observation)
noise = np.clip(np.random.randn(self.action_dim) * self.sigma,
-self.noise_cap, self.noise_cap)
action_with_noise = action + noise
return (np.clip(action_with_noise, self.action_low, self.action_high),
action, noise)
def store_experience(self, s, a, r, t, s2):
"""Save experience to replay buffer."""
self.replay_buffer.add(s, a, r, t, s2)
def train(self, global_step):
"""Train the agent's policy for 1 iteration."""
if self.replay_buffer.size > self.warmup_size:
s0, a, r, t, s1 = self.replay_buffer.sample_batch(self.batch_size)
epsilon = np.clip(np.random.randn(self.batch_size, self.action_dim),
-self.noise_cap, self.noise_cap)
target_actions = self.actor.get_target_action(s1) + epsilon
target_actions = np.clip(target_actions,
self.action_low,
self.action_high)
target_qval = self.get_target_qval(s1, target_actions)
t = t.astype(dtype=int)
y = r + self.gamma * target_qval * (1 - t)
self.critic1.train(s0, a, y)
self.critic2.train(s0, a, y)
if global_step % self.train_interval == 0:
actions = self.actor.get_action(s0)
grads = self.critic1.get_action_gradients(s0, actions)
self.actor.train(s0, grads[0])
self.update_targets()
def update_targets(self):
"""Update all target networks."""
self.actor.update_target_network()
self.critic1.update_target_network()
self.critic2.update_target_network()
def get_target_qval(self, observation, action):
"""Get target Q-val."""
target_qval1 = self.critic1.get_target_qval(observation, action)
target_qval2 = self.critic2.get_target_qval(observation, action)
return np.minimum(target_qval1, target_qval2)
def get_qval(self, observation, action):
"""Get Q-val."""
qval1 = self.critic1.get_qval(observation, action)
qval2 = self.critic2.get_qval(observation, action)
return np.minimum(qval1, qval2)
|
GoogleCloudPlatform/cloudml-samples
|
tensorflow/standard/reinforcement_learning/rl_on_gcp_demo/trainer/td3_agent.py
|
Python
|
apache-2.0
| 5,889
|
#!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# File: add_to_frame.py\2
# Date: Wed Jun 04 20:28:19 2014 +0800
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import cPickle as pickle
#import pickle
import gzip, numpy
import random
import argparse
from dataio import save_data, save_data
def get_args():
desc = 'add img into a larger frame'
parser = argparse.ArgumentParser(description = desc)
parser.add_argument('-i', '--input',
help='input file of name "*.pkl.gz" ', required=True)
parser.add_argument('-s', '--size',
help='frame size', required=True)
parser.add_argument('-p', '--place',
help='place of the image. either "random" or "(x, y)"')
ret = parser.parse_args()
return ret
def add_img_to_frame(img, frame, offset):
"""put a smaller matrix into a larger frame,
starting at a specific offset"""
img = img.reshape((orig_size, orig_size))
for x in xrange(orig_size):
frame[x + offset[0]][offset[1]: offset[1] + orig_size] = img[x]
def add_frame(dataset):
""" process a dataset consisting of a list of imgs"""
if args.place != 'random':
offset = eval(args.place)
assert type(offset) == tuple and len(offset) == 2
Xs = dataset[0]
newX = []
for (idx, k) in enumerate(Xs):
if args.place == 'random':
# generate a random offset
offset = (random.randint(0, frame_size - orig_size),
random.randint(0, frame_size - orig_size))
frame = numpy.zeros((frame_size, frame_size), dtype=numpy.float32)
add_img_to_frame(k, frame, offset)
newX.append(numpy.ndarray.flatten(frame))
return (numpy.asarray(newX), dataset[1])
# prepare params
args = get_args()
input = args.input
frame_size = int(args.size)
output_basename = input[:-6] + "frame{0}".format(frame_size)
# read data
train_set, valid_set, test_set = save_data(input)
print len(train_set[0]), len(valid_set[0]), len(test_set[0])
orig_size = int(numpy.sqrt(len(train_set[0][0])))
assert frame_size > orig_size, "frame size must be larger than original image"
# add to frame
train_set = add_frame(train_set)
valid_set = add_frame(valid_set)
test_set = add_frame(test_set)
print "Writing..."
data = (train_set, valid_set, test_set)
save_data(data, output_basename)
#usage: add_to_frame.py [-h] -i INPUT -s SIZE [-p PLACE]
#optional arguments:
#-h, --help show this help message and exit
#-i INPUT, --input INPUT
#input file of name "*.pkl.gz"
#-s SIZE, --size SIZE frame size
#-p PLACE, --place PLACE
#place of the image. either "random" or "(x, y)"
# output filename is 'input.frameXX.pkl.gz'
|
mfs6174/Deep6174
|
old-code/add_to_frame.py
|
Python
|
apache-2.0
| 2,760
|
from django.conf.urls import url, include
from .views import login, logout, register
urlpatterns = [
url(r'^login/', login, name='login'),
url(r'^logout/', logout, name='logout'),
url(r'^register/', register, name='register'),
]
|
AndreyRem/polls
|
polls/polls/apps/auth_service/urls.py
|
Python
|
apache-2.0
| 241
|
print "I am testing, TEST TEST TEST!"
|
antont/tundra
|
src/Application/PythonScriptModule/pymodules_old/runtests.py
|
Python
|
apache-2.0
| 38
|
import logging
from eclcli.common import utils
LOG = logging.getLogger(__name__)
DEFAULT_API_VERSION = '2'
API_VERSION_OPTION = 'os_storage_api_version'
API_NAME = "storage"
API_VERSIONS = {
"2": "storageclient.v2.client.Client"
}
def make_client(instance):
from .storageclient.v2 import client as storage_client
http_log_debug = utils.get_effective_log_level() <= logging.DEBUG
kwargs = utils.build_kwargs_dict('endpoint_type', instance._interface)
client = storage_client.Client(
session=instance.session,
http_log_debug=http_log_debug,
**kwargs
)
return client
def build_option_parser(parser):
# parser.add_argument(
# '--os-storage-api-version',
# metavar='<storage-api-version>',
# default=utils.env('OS_STORAGE_API_VERSION'),
# help='Storage API version, default=' +
# DEFAULT_API_VERSION +
# ' (Env: OS_STORAGE_API_VERSION)')
return parser
|
nttcom/eclcli
|
eclcli/storage/client.py
|
Python
|
apache-2.0
| 975
|
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from iptest.assert_util import *
class It:
x = 0
a = ()
def __init__(self, a):
self.x = 0
self.a = a
def __next__(self):
if self.x <= 9:
self.x = self.x+1
return self.a[self.x-1]
else:
raise StopIteration
def __iter__(self):
return self
class Iterator:
x = 0
a = (1,2,3,4,5,6,7,8,9,0)
def __iter__(self):
return It(self.a)
class Indexer:
a = (1,2,3,4,5,6,7,8,9,0)
def __getitem__(self, i):
if i < len(self.a):
return self.a[i]
else:
raise IndexError
i = Iterator()
for j in i:
Assert(j in i)
Assert(1 in i)
Assert(2 in i)
Assert(not (10 in i))
i = Indexer()
for j in i:
Assert(j in i)
Assert(1 in i)
Assert(2 in i)
Assert(not (10 in i))
# Testing the iter(o,s) function
class Iter:
x = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
index = -1
it = Iter()
def f():
it.index += 1
return it.x[it.index]
y = []
for i in iter(f, 14):
y.append(i)
Assert(y == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])
y = ['1']
y += Iterator()
Assert(y == ['1', 1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
y = ['1']
y += Indexer()
Assert(y == ['1', 1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
AssertErrorWithMessages(TypeError, "iter() takes at least 1 argument (0 given)",
"iter expected at least 1 arguments, got 0", iter)
def test_itertools_same_value():
x = iter(list(range(4)))
AreEqual([(i,j) for i,j in zip(x,x)], [(0, 1), (2, 3)])
def test_itertools_islice_end():
"""islice shouldn't consume values after the limit specified by step"""
from itertools import islice
# create a zipped iterator w/ odd number of values...
it = zip([2,3,4], [4,5,6])
# slice in 2, turn that into a list...
list(islice(it, 2))
# we should still have the last value still present
for x in it:
AreEqual(x, (4,6))
@skip("silverlight")
def test_iterator_for():
"""test various iterable objects with multiple incomplete iterations"""
def generator():
yield 0
yield 1
from io import StringIO
strO = StringIO()
strO.write('abc\n')
strO.write('def')
strI = StringIO('abc\ndef')
import sys
fi = sys.float_info
d = {2:3, 3:4}
l = [2, 3]
s = set([2, 3, 4])
if not is_silverlight:
f = file('test_file.txt', 'w+')
f.write('abc\n')
f.write('def')
f.close()
f = file('test_file.txt')
import nt
stat = nt.stat(__file__)
class x(object):
abc = 2
bcd = 3
dictproxy = x.__dict__
dictlist = list(x.__dict__)
ba = bytearray(b'abc')
try:
# iterator, first Value, second Value
iterators = [
# objects which when enumerated multiple times continue
(generator(), 0, 1),
(strI, 'abc\n', 'def'),
(strO, 'abc\n', 'def'),
# objects which when enumerated multiple times reset
(range(10), 0, 0),
([0, 1], 0, 0),
((0, 1), 0, 0),
(fi, fi[0], fi[0]),
(b'abc', b'a', b'a'),
(ba, ord(b'a'), ord(b'a')),
('abc', 'a', 'a'),
(d, list(d)[0], list(d)[0]),
(l, l[0], l[0]),
(s, list(s)[0], list(s)[0]),
(dictproxy, dictlist[0], dictlist[0]),
]
if not is_silverlight:
iterators.append((f, 'abc\n', 'def'))
iterators.append((stat, stat[0], stat[0]))
for iterator, res0, res1 in iterators:
for x in iterator:
AreEqual(x, res0)
break
for x in iterator:
AreEqual(x, res1)
break
finally:
f.close()
nt.unlink('test_file.txt')
def test_iterator_closed_file():
cf = file(__file__)
cf.close()
def f():
for x in cf: pass
AssertError(ValueError, f)
def test_no_return_self_in_iter():
class A(object):
def __iter__(cls):
return 1
def __next__(cls):
return 2
a = A()
AreEqual(next(a), 2)
def test_no_iter():
class A(object):
def __next__(cls):
return 2
a = A()
AreEqual(next(a), 2)
def test_with_iter():
class A(object):
def __iter__(cls):
return cls
def __next__(self):
return 2
a = A()
AreEqual(next(a), 2)
def test_with_iter_next_in_init():
class A(object):
def __init__(cls):
AreEqual(next(cls), 2)
AreEqual(next(cls), 2)
def __iter__(cls):
return cls
def __next__(cls):
return 2
a = A()
AreEqual(next(a), 2)
def test_interacting_iterators():
"""This test is similar to how Jinga2 fails."""
class A(object):
def __iter__(cls):
return cls
def __next__(self):
return 3
class B(object):
def __iter__(cls):
return A()
def __next__(self):
return 2
b = B()
AreEqual(next(b), 2)
def test_call_to_iter_or_next():
class A(object):
def __iter__(cls):
Assert(False, "__iter__ should not be called.")
return cls
def __next__(self):
return 2
a = A()
AreEqual(next(a), 2)
run_test(__name__)
|
moto-timo/ironpython3
|
Tests/test_iterator.py
|
Python
|
apache-2.0
| 6,817
|
# -*- test-case-name: twistedcaldav.directory.test.test_util -*-
##
# Copyright (c) 2006-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Utilities.
"""
__all__ = [
"normalizeUUID",
"uuidFromName",
"NotFoundResource",
]
from twext.enterprise.ienterprise import AlreadyFinishedError
from twext.python.log import Logger
from txweb2 import responsecode
from txweb2.auth.wrapper import UnauthorizedResponse
from txweb2.dav.resource import DAVResource
from txweb2.http import StatusResponse
from twisted.internet.defer import inlineCallbacks, returnValue
from uuid import UUID, uuid5
from twisted.python.failure import Failure
from twisted.web.template import tags
log = Logger()
def uuidFromName(namespace, name):
"""
Generate a version 5 (SHA-1) UUID from a namespace UUID and a name.
See http://www.ietf.org/rfc/rfc4122.txt, section 4.3.
@param namespace: a UUID denoting the namespace of the generated UUID.
@param name: a byte string to generate the UUID from.
"""
# We don't want Unicode here; convert to UTF-8
if type(name) is unicode:
name = name.encode("utf-8")
return normalizeUUID(str(uuid5(UUID(namespace), name)))
def normalizeUUID(value):
"""
Convert strings which the uuid.UUID( ) method can parse into normalized
(uppercase with hyphens) form. Any value which is not parsed by UUID( )
is returned as is.
@param value: string value to normalize
"""
try:
return str(UUID(value)).upper()
except (ValueError, TypeError):
return value
TRANSACTION_KEY = '_newStoreTransaction'
def transactionFromRequest(request, newStore):
"""
Return the associated transaction from the given HTTP request, creating a
new one from the given data store if none has yet been associated.
Also, if the request was not previously associated with a transaction, add
a failsafe transaction-abort response filter to abort any transaction which
has not been committed or aborted by the resource which responds to the
request.
@param request: The request to inspect.
@type request: L{IRequest}
@param newStore: The store to create a transaction from.
@type newStore: L{IDataStore}
@return: a transaction that should be used to read and write data
associated with the request.
@rtype: L{ITransaction} (and possibly L{ICalendarTransaction} and
L{IAddressBookTransaction} as well.
"""
transaction = getattr(request, TRANSACTION_KEY, None)
if transaction is None:
if hasattr(request, "authzUser") and request.authzUser is not None:
authz_uid = request.authzUser.record.uid
else:
authz_uid = None
transaction = newStore.newTransaction(repr(request), authz_uid=authz_uid)
def abortIfUncommitted(request, response):
try:
# TODO: missing 'yield' here. For formal correctness as per
# the interface, this should be allowed to be a Deferred. (The
# actual implementation still raises synchronously, so there's
# no bug currently.)
transaction.abort()
except AlreadyFinishedError:
pass
return response
abortIfUncommitted.handleErrors = True
request.addResponseFilter(abortIfUncommitted)
setattr(request, TRANSACTION_KEY, transaction)
return transaction
def splitIntoBatches(data, size):
"""
Return a generator of sets consisting of the contents of the data set
split into parts no larger than size.
"""
if not data:
yield set([])
data = list(data)
while data:
yield set(data[:size])
del data[:size]
class NotFoundResource(DAVResource):
"""
In order to prevent unauthenticated discovery of existing users via 401/404
response codes, this resource can be returned from locateChild, and it will
perform an authentication; if the user is unauthenticated, 404 responses are
turned into 401s.
"""
@inlineCallbacks
def renderHTTP(self, request):
try:
_ignore_authnUser, authzUser = yield self.authenticate(request)
except Exception:
authzUser = None
# Turn 404 into 401
if authzUser is None:
response = (yield UnauthorizedResponse.makeResponse(
request.credentialFactories,
request.remoteAddr
))
returnValue(response)
else:
response = StatusResponse(responsecode.NOT_FOUND, "Resource not found")
returnValue(response)
def formatLink(url):
"""
Convert a URL string into some twisted.web.template DOM objects for
rendering as a link to itself.
"""
return tags.a(href=url)(url)
def formatLinks(urls):
"""
Format a list of URL strings as a list of twisted.web.template DOM links.
"""
return formatList(formatLink(link) for link in urls)
def formatPrincipals(principals):
"""
Format a list of principals into some twisted.web.template DOM objects.
"""
def recordKey(principal):
try:
record = principal.record
except AttributeError:
try:
record = principal.parent.record
except:
return None
try:
shortName = record.shortNames[0]
except AttributeError:
shortName = u""
return (record.recordType, shortName)
def describe(principal):
if hasattr(principal, "record"):
return " - %s" % (principal.record.displayName,)
else:
return ""
return formatList(
tags.a(href=principal.principalURL())(
str(principal), describe(principal)
)
for principal in sorted(principals, key=recordKey)
)
def formatList(iterable):
"""
Format a list of stuff as an interable.
"""
thereAreAny = False
try:
item = None
for item in iterable:
thereAreAny = True
yield " -> "
if item is None:
yield "None"
else:
yield item
yield "\n"
except Exception, e:
log.error("Exception while rendering: %s" % (e,))
Failure().printTraceback()
yield " ** %s **: %s\n" % (e.__class__.__name__, e)
if not thereAreAny:
yield " '()\n"
|
trevor/calendarserver
|
twistedcaldav/directory/util.py
|
Python
|
apache-2.0
| 7,006
|
"""The testing suite for seqtools
Testing modules are amongst the module in test.py files.
Each of these modules points to the base data directory as needed
This script is just for executing the test run
"""
import unittest, sys
import seqtools.format.sam.test
import seqtools.format.fasta.test
import seqtools.statistics.test
import seqtools.simulation.test
import seqtools.structure.transcriptome.test
import seqtools.structure.test
if __name__ == '__main__':
loader = unittest.TestLoader()
s = []
#s.append(loader.loadTestsFromModule(seqtools.format.fasta.test))
#s.append(loader.loadTestsFromModule(seqtools.statistics.test))
#s.append(loader.loadTestsFromModule(seqtools.format.sam.test))
#s.append(loader.loadTestsFromModule(seqtools.structure.transcriptome.test))
#s.append(loader.loadTestsFromModule(seqtools.structure.test))
s.append(loader.loadTestsFromModule(seqtools.simulation.test))
unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(s))
|
jason-weirather/py-seq-tools
|
tests/runtests.py
|
Python
|
apache-2.0
| 998
|
if __name__ == '__main__' :
arr1=[1,2,3,4,7,8,9]
arr2=[1,2,5,6,7,8]
uncommon_ele = len(set(arr1)-set(arr2))+len(set(arr2)-set(arr1))
print(uncommon_ele)
|
saisankargochhayat/algo_quest
|
Misc/uncommon_array.py
|
Python
|
apache-2.0
| 169
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Whole population model"""
import sys
import os.path
import tensorflow as tf
from absl import app
from absl import flags
from absl import gfile
import cPickle as pickle
import matplotlib
matplotlib.use('TkAgg')
import numpy as np, h5py
import scipy.io as sio
from scipy import ndimage
import random
FLAGS = flags.FLAGS
flags.DEFINE_float('lam_w', 0.0001, 'sparsitiy regularization of w')
flags.DEFINE_float('lam_a', 0.0001, 'sparsitiy regularization of a')
flags.DEFINE_integer('ratio_SU', 7, 'ratio of subunits/cells')
flags.DEFINE_float('su_grid_spacing', 3, 'grid spacing')
flags.DEFINE_integer('np_randseed', 23, 'numpy RNG seed')
flags.DEFINE_integer('randseed', 65, 'python RNG seed')
flags.DEFINE_float('eta_w', 1e-3, 'learning rate for optimization functions')
flags.DEFINE_float('eta_a', 1e-2, 'learning rate for optimization functions')
flags.DEFINE_float('bias_init_scale', -1, 'bias initialized at scale*std')
flags.DEFINE_string('model_id', 'relu', 'which model to learn?');
flags.DEFINE_float('step_sz', 10, 'step size for learning algorithm')
flags.DEFINE_integer('window', 3, 'size of window for each subunit in relu_window model')
flags.DEFINE_integer('stride', 3, 'stride for relu_window')
flags.DEFINE_string('folder_name', 'experiment4', 'folder where to store all the data')
flags.DEFINE_string('save_location',
'/home/bhaishahster/',
'where to store logs and outputs?');
flags.DEFINE_string('data_location',
'/home/bhaishahster/data_breakdown/',
'where to take data from?')
flags.DEFINE_integer('batchsz', 1000, 'batch size for training')
flags.DEFINE_integer('n_chunks', 216, 'number of data chunks') # should be 216
flags.DEFINE_integer('n_b_in_c', 1, 'number of batches in one chunk of data')
def hex_grid(gridx, d, n):
x_log = np.array([])
y_log = np.array([])
for i in range(n):
x_log = (np.append(x_log, (((i*d)%gridx) +
(np.floor(i*d/gridx)%2)*d/2)) +
np.random.randn(1)*0.01)
y_log = np.append(y_log, np.floor((i*d/gridx))*d/2) + np.random.randn(1)*0.01
return x_log, y_log
def gauss_su(x_log, y_log, gridx=80, gridy=40):
ns = x_log.shape[0]
wts = np.zeros((3200, ns))
for isu in range(ns):
xx = np.zeros((gridy, gridx))
if((np.round(y_log[isu]) >= gridy) |
(np.round(y_log[isu]) < 0) |
(np.round(x_log[isu]) >= gridx) | (np.round(x_log[isu]) < 0)):
continue
xx[np.round(y_log[isu]), np.round(x_log[isu])] = 1
blurred_xx = ndimage.gaussian_filter(xx, sigma=2)
wts[:,isu] = np.ndarray.flatten(blurred_xx)
return wts
def initialize_su(n_su=107*10, gridx=80, gridy=40, spacing=5.7):
spacing = FLAGS.su_grid_spacing
x_log, y_log = hex_grid(gridx, spacing, n_su)
wts = gauss_su(x_log, y_log)
return wts
def get_test_data():
# stimulus.astype('float32')[216000-1000: 216000-1, :]
# response.astype('float32')[216000-1000: 216000-1, :]
# length
test_data_chunks = [FLAGS.n_chunks];
for ichunk in test_data_chunks:
filename = FLAGS.data_location + 'Off_par_data_' + str(ichunk) + '.mat'
file_r = gfile.Open(filename, 'r')
data = sio.loadmat(file_r)
stim_part = data['maskedMovdd_part'].T
resp_part = data['Y_part'].T
test_len = stim_part.shape[0]
#logfile.write('\nReturning test data')
return stim_part, resp_part, test_len
# global stimulus variables
stim_train_part = np.array([])
resp_train_part = np.array([])
chunk_order = np.array([])
cells_choose = np.array([])
chosen_mask = np.array([])
def get_next_training_batch(iteration):
# stimulus.astype('float32')[tms[icnt: icnt+FLAGS.batchsz], :],
# response.astype('float32')[tms[icnt: icnt+FLAGS.batchsz], :]
# FLAGS.batchsz
# we will use global stimulus and response variables
global stim_train_part
global resp_train_part
global chunk_order
togo = True
while togo:
if(iteration % FLAGS.n_b_in_c == 0):
# load new chunk of data
ichunk = (iteration / FLAGS.n_b_in_c) % (FLAGS.n_chunks - 1 ) # last one chunks used for testing
if (ichunk == 0): # shuffle training chunks at start of training data
chunk_order = np.random.permutation(np.arange(FLAGS.n_chunks-1)) # remove first chunk - weired?
# if logfile != None :
# logfile.write('\nTraining chunks shuffled')
if chunk_order[ichunk] + 1 != 1:
filename = FLAGS.data_location + 'Off_par_data_' + str(chunk_order[ichunk] + 1) + '.mat'
file_r = gfile.Open(filename, 'r')
data = sio.loadmat(file_r)
stim_train_part = data['maskedMovdd_part']
resp_train_part = data['Y_part']
ichunk = chunk_order[ichunk] + 1
while stim_train_part.shape[1] < FLAGS.batchsz:
#print('Need to add extra chunk')
if (ichunk> FLAGS.n_chunks):
ichunk = 2
filename = FLAGS.data_location + 'Off_par_data_' + str(ichunk) + '.mat'
file_r = gfile.Open(filename, 'r')
data = sio.loadmat(file_r)
stim_train_part = np.append(stim_train_part, data['maskedMovdd_part'], axis=1)
resp_train_part = np.append(resp_train_part, data['Y_part'], axis=1)
#print(np.shape(stim_train_part), np.shape(resp_train_part))
ichunk = ichunk + 1
# if logfile != None:
# logfile.write('\nNew training data chunk loaded at: '+ str(iteration) + ' chunk #: ' + str(chunk_order[ichunk]))
ibatch = iteration % FLAGS.n_b_in_c
try:
stim_train = np.array(stim_train_part[:,ibatch: ibatch + FLAGS.batchsz], dtype='float32').T
resp_train = np.array(resp_train_part[:,ibatch: ibatch + FLAGS.batchsz], dtype='float32').T
togo=False
except:
iteration = np.random.randint(1,100000)
print('Load exception iteration: ' + str(iteration) + 'chunk: ' + str(chunk_order[ichunk]) + 'batch: ' + str(ibatch) )
togo=True
return stim_train, resp_train, FLAGS.batchsz
def main(argv):
print('\nCode started')
print('Model is ' + FLAGS.model_id)
np.random.seed(FLAGS.np_randseed)
random.seed(FLAGS.randseed)
global chunk_order
chunk_order = np.random.permutation(np.arange(FLAGS.n_chunks-1))
## Load data summary
filename = FLAGS.data_location + 'data_details.mat'
summary_file = gfile.Open(filename, 'r')
data_summary = sio.loadmat(summary_file)
cells = np.squeeze(data_summary['cells'])
nCells = cells.shape[0]
stim_dim = np.squeeze(data_summary['stim_dim'])
tot_spks = np.squeeze(data_summary['tot_spks'])
total_mask = np.squeeze(data_summary['totalMaskAccept_log']).T
print(np.shape(total_mask))
print('\ndataset summary loaded')
# decide the number of subunits to fit
Nsub = FLAGS.ratio_SU*nCells
with tf.Session() as sess:
stim = tf.placeholder(tf.float32, shape=[None, stim_dim], name='stim')
resp = tf.placeholder(tf.float32, name='resp')
data_len = tf.placeholder(tf.float32, name='data_len')
if FLAGS.model_id == 'relu':
# lam_c(X) = sum_s(a_cs relu(k_s.x)) , a_cs>0
short_filename = ('data_model=' + str(FLAGS.model_id) +
'_lam_w=' + str(FLAGS.lam_w) +
'_lam_a='+str(FLAGS.lam_a) + '_ratioSU=' + str(FLAGS.ratio_SU) +
'_grid_spacing=' + str(FLAGS.su_grid_spacing) + '_normalized_bg')
if FLAGS.model_id == 'exp':
short_filename = ('data_model=' + str(FLAGS.model_id) +
'_bias_init=' + str(FLAGS.bias_init_scale) + '_ratioSU=' + str(FLAGS.ratio_SU) +
'_grid_spacing=' + str(FLAGS.su_grid_spacing) + '_normalized_bg')
if FLAGS.model_id == 'mel_re_pow2':
short_filename = ('data_model=' + str(FLAGS.model_id) +
'_lam_w=' + str(FLAGS.lam_w) +
'_lam_a='+str(FLAGS.lam_a) + '_ratioSU=' + str(FLAGS.ratio_SU) +
'_grid_spacing=' + str(FLAGS.su_grid_spacing) + '_normalized_bg')
if FLAGS.model_id == 'relu_logistic':
short_filename = ('data_model=' + str(FLAGS.model_id) +
'_lam_w=' + str(FLAGS.lam_w) +
'_lam_a='+str(FLAGS.lam_a) + '_ratioSU=' + str(FLAGS.ratio_SU) +
'_grid_spacing=' + str(FLAGS.su_grid_spacing) + '_normalized_bg')
if FLAGS.model_id == 'relu_proximal':
short_filename = ('data_model=' + str(FLAGS.model_id) +
'_lam_w=' + str(FLAGS.lam_w) +
'_lam_a='+str(FLAGS.lam_a) + '_eta_w=' + str(FLAGS.eta_w) + '_eta_a=' + str(FLAGS.eta_a) + '_ratioSU=' + str(FLAGS.ratio_SU) +
'_grid_spacing=' + str(FLAGS.su_grid_spacing) + '_proximal_bg')
if FLAGS.model_id == 'relu_eg':
short_filename = ('data_model=' + str(FLAGS.model_id) +
'_lam_w=' + str(FLAGS.lam_w) +
'_eta_w=' + str(FLAGS.eta_w) + '_eta_a=' + str(FLAGS.eta_a) + '_ratioSU=' + str(FLAGS.ratio_SU) +
'_grid_spacing=' + str(FLAGS.su_grid_spacing) + '_eg_bg')
if FLAGS.model_id == 'relu_window':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
if FLAGS.model_id == 'relu_window_mother':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
if FLAGS.model_id == 'relu_window_mother_sfm':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
if FLAGS.model_id == 'relu_window_mother_sfm_exp':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
if FLAGS.model_id == 'relu_window_exp':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
if FLAGS.model_id == 'relu_window_mother_exp':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
if FLAGS.model_id == 'relu_window_a_support':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
if FLAGS.model_id == 'exp_window_a_support':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
parent_folder = FLAGS.save_location + FLAGS.folder_name + '/'
if not gfile.IsDirectory(parent_folder):
gfile.MkDir(parent_folder)
FLAGS.save_location = parent_folder +short_filename + '/'
print(gfile.IsDirectory(FLAGS.save_location))
if not gfile.IsDirectory(FLAGS.save_location):
gfile.MkDir(FLAGS.save_location)
print(FLAGS.save_location)
save_filename = FLAGS.save_location + short_filename
'''
# load previous iteration data, if available
try:
saved_filename = save_filename + '.pkl'
saved_file = gfile.Open(saved_filename,'r')
saved_data = pickle.load(saved_file)
w_load = saved_data['w']
a_load = saved_data['a']
w_init = saved_data['w_init']
a_init = saved_data['a_init']
ls_train_log = np.squeeze(saved_data['ls_train_log'])
ls_test_log = np.squeeze(saved_data['ls_test_log'])
start_iter = np.squeeze(saved_data['last_iter'])
chunk_order = np.squeeze(saved_data['chunk_order'])
print(np.shape(w_init),np.shape(a_init))
load_prev = True
except:
# w and a initialized same for all models! (maybe should be different for exp NL?)
w_init = initialize_su(n_su=Nsub) * 0.01
if FLAGS.model_id != 'exp':
a_init = np.random.rand(Nsub, nCells) * 0.01
else:
a_init = np.random.rand(nCells,1,Nsub) * 0.01
w_load = w_init
a_load = a_init
ls_train_log = np.array([])
ls_test_log = np.array([])
start_iter=0
print(np.shape(w_init),np.shape(a_init))
load_prev = False
'''
w_init = initialize_su(n_su=Nsub) * 0.01
if FLAGS.model_id != 'exp':
a_init = np.random.rand(Nsub, nCells) * 0.01
else:
a_init = np.random.rand(nCells,1,Nsub) * 0.01
w_load = w_init
a_load = a_init
ls_train_log = np.array([])
ls_test_log = np.array([])
print(np.shape(w_init),np.shape(a_init))
load_prev = False
if FLAGS.model_id == 'relu':
# LNL model with RELU nl
w = tf.Variable(np.array(w_load, dtype='float32'))
a = tf.Variable(np.array(a_load, dtype='float32'))
lam = tf.matmul(tf.nn.relu(tf.matmul(stim, w)), tf.nn.relu(a)) + 0.0001
loss_inter = (tf.reduce_sum(lam)/120. - tf.reduce_sum(resp*tf.log(lam))) / data_len
loss = loss_inter
+ FLAGS.lam_w*tf.reduce_sum(tf.abs(w)) + FLAGS.lam_a*tf.reduce_sum(tf.abs(a))
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss, var_list=[w, a])
a_pos = tf.assign(a, (a + tf.abs(a))/2)
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict)
sess.run(a_pos)
def get_loss(inp_dict):
ls = sess.run(loss,feed_dict = inp_dict)
return ls
w_summary = tf.histogram_summary('w', w)
a_summary = tf.histogram_summary('a', a)
if FLAGS.model_id == 'exp':
# lam_c(X) = sum_s(exp(k_s.x + b_cs)) ; used in earlier models.
w = tf.Variable(np.array(w_load, dtype='float32'))
a = tf.Variable(np.array(a_load, dtype='float32'))
lam = tf.transpose(tf.reduce_sum(tf.exp(tf.matmul(stim,w) + a), 2))
loss_inter = (tf.reduce_sum(lam/tot_spks)/120. - tf.reduce_sum(resp*tf.log(lam)/tot_spks)) / data_len
loss = loss_inter
train_step = tf.train.AdamOptimizer(FLAGS.step_sz).minimize(loss, var_list=[w, a])
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict)
def get_loss(inp_dict):
ls = sess.run(loss,feed_dict = inp_dict)
return ls
w_summary = tf.histogram_summary('w', w)
a_summary = tf.histogram_summary('a', a)
if FLAGS.model_id == 'mel_re_pow2':
# lam_c(X) = sum_s(relu(k_s.x + a_cs)^2); MEL approximation of log-likelihood
stimulus,_,_ = get_next_training_batch(10)
sigma = np.diag(np.diag(stimulus[1000:2000,:].T.dot(stimulus[1000:2000,: ])))
sig_tf = tf.Variable(sigma,dtype='float32')
w = tf.Variable(np.array(w_load, dtype='float32'))
a = tf.Variable(np.array(a_load, dtype='float32'))
a_pos = tf.assign(a, (a + tf.abs(a))/2)
lam = tf.matmul(tf.pow(tf.nn.relu(tf.matmul(stim, w)), 2), a) + 0.0001
loss_p1 = tf.reduce_sum(tf.matmul(tf.transpose(a / tot_spks),tf.expand_dims(tf.diag_part(tf.matmul(tf.transpose(w),tf.matmul(sig_tf,w))) / 2,1)))
loss_inter = (loss_p1 / 120.) - (tf.reduce_sum(resp * tf.log(lam) / tot_spks)) / data_len
loss = loss_inter
+ FLAGS.lam_w*tf.reduce_sum(tf.abs(w)) + FLAGS.lam_a*tf.reduce_sum(tf.abs(a))
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss, var_list=[w, a])
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict)
sess.run(a_pos)
def get_loss(inp_dict):
ls = sess.run(loss,feed_dict = inp_dict)
return ls
w_summary = tf.histogram_summary('w', w)
a_summary = tf.histogram_summary('a', a)
if FLAGS.model_id == 'relu_logistic':
# f(X) = sum_s(a_cs relu(k_s.x)), acs - any sign, logistic loss.
w = tf.Variable(np.array(w_load, dtype='float32'))
a = tf.Variable(np.array(a_load, dtype='float32'))
b_init = np.random.randn(nCells)#np.log((np.sum(response,0))/(response.shape[0]-np.sum(response,0)))
b = tf.Variable(b_init,dtype='float32')
f = tf.matmul(tf.nn.relu(tf.matmul(stim, w)), a) + b
loss_inter = tf.reduce_sum(tf.nn.softplus(-2 * (resp - 0.5)*f))/ data_len
loss = loss_inter
+ FLAGS.lam_w*tf.reduce_sum(tf.abs(w)) + FLAGS.lam_a*tf.reduce_sum(tf.abs(a))
sigmoid_input = -resp*f
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss, var_list=[w, a, b])
a_pos = tf.assign(a, (a + tf.abs(a))/2)
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict)
sess.run(a_pos)
def get_loss(inp_dict):
ls = sess.run(loss,feed_dict = inp_dict)
return ls
w_summary = tf.histogram_summary('w', w)
a_summary = tf.histogram_summary('a', a)
b_summary = tf.histogram_summary('b', b)
if FLAGS.model_id == 'relu_proximal':
# lnl model with regularization, with proximal updates
w = tf.Variable(np.array(w_load, dtype='float32'))
a = tf.Variable(np.array(a_load, dtype='float32'))
lam = tf.matmul(tf.nn.relu(tf.matmul(stim, w)), a) + 0.0001
loss_inter = (tf.reduce_sum(lam/tot_spks)/120. - tf.reduce_sum(resp*tf.log(lam)/tot_spks)) / data_len
loss = loss_inter + FLAGS.lam_w*tf.reduce_sum(tf.abs(w)) + FLAGS.lam_a*tf.reduce_sum(tf.abs(a))
# training steps for a.
train_step_a = tf.train.AdagradOptimizer(FLAGS.eta_a).minimize(loss_inter, var_list=[a])
# as 'a' is positive, this is op soft-thresholding for L1 and projecting to feasible set
soft_th_a = tf.assign(a, tf.nn.relu(a - FLAGS.eta_a * FLAGS.lam_a))
# training steps for w
train_step_w = tf.train.AdagradOptimizer(FLAGS.eta_w).minimize(loss_inter, var_list=[w])
# do soft thresholding for 'w'
soft_th_w = tf.assign(w, tf.nn.relu(w - FLAGS.eta_w * FLAGS.lam_w) - tf.nn.relu(- w - FLAGS.eta_w * FLAGS.lam_w))
def training(inp_dict):
# gradient step for 'w'
sess.run(train_step_w, feed_dict=inp_dict)
# soft thresholding for w
sess.run(soft_th_w, feed_dict=inp_dict)
# gradient step for 'a'
sess.run(train_step_a, feed_dict=inp_dict)
# soft thresholding for a, and project in constraint set
sess.run(soft_th_a, feed_dict=inp_dict)
def get_loss(inp_dict):
ls = sess.run(loss,feed_dict = inp_dict)
return ls
if FLAGS.model_id == 'relu_eg':
a_load = a_load / np.sum(a_load, axis=0)# normalize initial a
w = tf.Variable(np.array(w_load, dtype='float32'))
a = tf.Variable(np.array(a_load, dtype='float32'))
lam = tf.matmul(tf.nn.relu(tf.matmul(stim, w)), a) + 0.0001
loss_inter = (tf.reduce_sum(lam/tot_spks)/120. - tf.reduce_sum(resp*tf.log(lam)/tot_spks)) / data_len
loss = loss_inter + FLAGS.lam_w*tf.reduce_sum(tf.abs(w))
# steps to update a
# as 'a' is positive, this is op soft-thresholding for L1 and projecting to feasible set
eta_a_tf = tf.constant(np.squeeze(FLAGS.eta_a),dtype='float32')
grads_a = tf.gradients(loss_inter, a)
exp_grad_a = tf.squeeze(tf.mul(a,tf.exp(-eta_a_tf * grads_a)))
a_update = tf.assign(a,exp_grad_a/tf.reduce_sum(exp_grad_a,0))
# steps to update w
# gradient update of 'w'..
train_step_w = tf.train.AdagradOptimizer(FLAGS.eta_w).minimize(loss_inter, var_list=[w])
# do soft thresholding for 'w'
soft_th_w = tf.assign(w, tf.nn.relu(w - FLAGS.eta_w * FLAGS.lam_w) - tf.nn.relu(- w - FLAGS.eta_w * FLAGS.lam_w))
def training(inp_dict):
# gradient step for 'a' and 'w'
sess.run(train_step_w, feed_dict=inp_dict)
# soft thresholding for w
sess.run(soft_th_w)
# update a
sess.run(a_update, feed_dict=inp_dict)
print('eg training made')
def get_loss(inp_dict):
ls = sess.run(loss,feed_dict = inp_dict)
return ls
if FLAGS.model_id == 'relu_window':
# convolution weights, each layer is delta(x,y) - basically take window of stimulus.
window = FLAGS.window
n_pix = (2* window + 1) ** 2
w_mask = np.zeros((2 * window + 1, 2 * window + 1, 1, n_pix))
icnt = 0
for ix in range(2 * window + 1):
for iy in range(2 * window + 1):
w_mask[ix, iy, 0, icnt] =1
icnt = icnt + 1
mask_tf = tf.constant(np.array(w_mask, dtype='float32'))
# set weight and other variables
dimx = np.floor(1 + ((40 - (2 * window + 1))/FLAGS.stride)).astype('int')
dimy = np.floor(1 + ((80 - (2 * window + 1))/FLAGS.stride)).astype('int')
w = tf.Variable(np.array(0.1+ 0.05*np.random.rand(dimx, dimy, n_pix),dtype='float32')) # exp 5
#w = tf.Variable(np.array(np.random.randn(dimx, dimy, n_pix),dtype='float32')) # exp 4
a = tf.Variable(np.array(np.random.rand(dimx*dimy, nCells),dtype='float32'))
a_pos = tf.assign(a, (a + tf.abs(a))/2)
# get firing rate
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
stim_masked = tf.nn.conv2d(stim4D, mask_tf, strides=[1, FLAGS.stride, FLAGS.stride, 1], padding="VALID" )
stim_wts = tf.nn.relu(tf.reduce_sum(tf.mul(stim_masked, w), 3))
lam = tf.matmul(tf.reshape(stim_wts, [-1,dimx*dimy]),a) + 0.00001
loss_inter = (tf.reduce_sum(lam)/120. - tf.reduce_sum(resp*tf.log(lam)))/data_len
loss = loss_inter + FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w))
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss,var_list=[w,a])
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict)
sess.run(a_pos)
def get_loss(inp_dict):
ls = sess.run(loss, feed_dict=inp_dict)
return ls
w_summary = tf.histogram_summary('w', w)
a_summary = tf.histogram_summary('a', a)
if FLAGS.model_id == 'relu_window_mother':
# convolution weights, each layer is delta(x,y) - basically take window of stimulus.
window = FLAGS.window
n_pix = (2* window + 1) ** 2
w_mask = np.zeros((2 * window + 1, 2 * window + 1, 1, n_pix))
icnt = 0
for ix in range(2 * window + 1):
for iy in range(2 * window + 1):
w_mask[ix, iy, 0, icnt] =1
icnt = icnt + 1
mask_tf = tf.constant(np.array(w_mask, dtype='float32'))
# set weight and other variables
dimx = np.floor(1 + ((40 - (2 * window + 1))/FLAGS.stride)).astype('int')
dimy = np.floor(1 + ((80 - (2 * window + 1))/FLAGS.stride)).astype('int')
w_del = tf.Variable(np.array(0.1+ 0.05*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
w_mother = tf.Variable(np.array(np.ones((2 * window + 1, 2 * window + 1, 1, 1)),dtype='float32'))
a = tf.Variable(np.array(np.random.rand(dimx*dimy, nCells),dtype='float32'))
a_pos = tf.assign(a, (a + tf.abs(a))/2)
# get firing rate
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
# mother weight convolution
stim_convolved = tf.reduce_sum( tf.nn.conv2d(stim4D, w_mother, strides=[1, FLAGS.stride, FLAGS.stride, 1], padding="VALID"),3)
#
stim_masked = tf.nn.conv2d(stim4D, mask_tf, strides=[1, FLAGS.stride, FLAGS.stride, 1], padding="VALID" )
stim_del = tf.reduce_sum(tf.mul(stim_masked, w_del), 3)
su_act = tf.nn.relu(stim_del + stim_convolved)
lam = tf.matmul(tf.reshape(su_act, [-1, dimx*dimy]),a) + 0.00001
loss_inter = (tf.reduce_sum(lam)/120. - tf.reduce_sum(resp*tf.log(lam)))/data_len
loss = loss_inter + FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w_del))
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss,var_list=[w_mother, w_del, a])
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict)
sess.run(a_pos)
def get_loss(inp_dict):
ls = sess.run(loss, feed_dict=inp_dict)
return ls
w_del_summary = tf.histogram_summary('w_del', w_del)
w_mother_summary = tf.histogram_summary('w_mother', w_mother)
a_summary = tf.histogram_summary('a', a)
if FLAGS.model_id == 'relu_window_mother_sfm':
# softmax weights used!
# convolution weights, each layer is delta(x,y) - basically take window of stimulus.
window = FLAGS.window
n_pix = (2* window + 1) ** 2
w_mask = np.zeros((2 * window + 1, 2 * window + 1, 1, n_pix))
icnt = 0
for ix in range(2 * window + 1):
for iy in range(2 * window + 1):
w_mask[ix, iy, 0, icnt] =1
icnt = icnt + 1
mask_tf = tf.constant(np.array(w_mask, dtype='float32'))
# set weight and other variables
dimx = np.floor(1 + ((40 - (2 * window + 1))/FLAGS.stride)).astype('int')
dimy = np.floor(1 + ((80 - (2 * window + 1))/FLAGS.stride)).astype('int')
w_del = tf.Variable(np.array(0.1 + 0.05*np.random.randn(dimx, dimy, n_pix),dtype='float32'))
w_mother = tf.Variable(np.array(np.ones((2 * window + 1, 2 * window + 1, 1, 1)),dtype='float32'))
a = tf.Variable(np.array(np.random.randn(dimx*dimy, nCells),dtype='float32'))
b = tf.transpose(tf.nn.softmax(tf.transpose(a)))
# get firing rate
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
# mother weight convolution
stim_convolved = tf.reduce_sum( tf.nn.conv2d(stim4D, w_mother, strides=[1, FLAGS.stride, FLAGS.stride, 1], padding="VALID"),3)
#
stim_masked = tf.nn.conv2d(stim4D, mask_tf, strides=[1, FLAGS.stride, FLAGS.stride, 1], padding="VALID" )
stim_del = tf.reduce_sum(tf.mul(stim_masked, w_del), 3)
su_act = tf.nn.relu(stim_del + stim_convolved)
lam = tf.matmul(tf.reshape(su_act, [-1, dimx*dimy]), b) + 0.00001
loss_inter = (tf.reduce_sum(lam)/120. - tf.reduce_sum(resp*tf.log(lam)))/data_len
loss = loss_inter + FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w_del))
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss,var_list=[w_mother, w_del, a])
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict)
def get_loss(inp_dict):
ls = sess.run(loss, feed_dict=inp_dict)
return ls
w_del_summary = tf.histogram_summary('w_del', w_del)
w_mother_summary = tf.histogram_summary('w_mother', w_mother)
a_summary = tf.histogram_summary('a', a)
if FLAGS.model_id == 'relu_window_mother_sfm_exp':
# softmax weights used!
# convolution weights, each layer is delta(x,y) - basically take window of stimulus.
window = FLAGS.window
n_pix = (2* window + 1) ** 2
w_mask = np.zeros((2 * window + 1, 2 * window + 1, 1, n_pix))
icnt = 0
for ix in range(2 * window + 1):
for iy in range(2 * window + 1):
w_mask[ix, iy, 0, icnt] =1
icnt = icnt + 1
mask_tf = tf.constant(np.array(w_mask, dtype='float32'))
# set weight and other variables
dimx = np.floor(1 + ((40 - (2 * window + 1))/FLAGS.stride)).astype('int')
dimy = np.floor(1 + ((80 - (2 * window + 1))/FLAGS.stride)).astype('int')
w_del = tf.Variable(np.array( 0.05*np.random.randn(dimx, dimy, n_pix),dtype='float32'))
w_mother = tf.Variable(np.array(np.ones((2 * window + 1, 2 * window + 1, 1, 1)),dtype='float32'))
a = tf.Variable(np.array(np.random.randn(dimx*dimy, nCells),dtype='float32'))
b = tf.transpose(tf.nn.softmax(tf.transpose(a)))
# get firing rate
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
# mother weight convolution
stim_convolved = tf.reduce_sum( tf.nn.conv2d(stim4D, w_mother, strides=[1, FLAGS.stride, FLAGS.stride, 1], padding="VALID"),3)
#
stim_masked = tf.nn.conv2d(stim4D, mask_tf, strides=[1, FLAGS.stride, FLAGS.stride, 1], padding="VALID" )
stim_del = tf.reduce_sum(tf.mul(stim_masked, w_del), 3)
su_act = tf.nn.relu(stim_del + stim_convolved)
lam = tf.exp(tf.matmul(tf.reshape(su_act, [-1, dimx*dimy]), b)) + 0.00001
loss_inter = (tf.reduce_sum(lam)/120. - tf.reduce_sum(resp*tf.log(lam)))/data_len
loss = loss_inter + FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w_del))
# version 0
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss,var_list=[w_mother, w_del, a])
# version 1
'''
optimizer = tf.train.AdagradOptimizer(..)
grad_and_vars = optimizer.compute_gradientS(...)
manipulated_grads_and_vars = []
clip = 1.0
for g, v in grad_and_vars:
if g is not None:
tf.histogram_summary(g.name + "/histogram", g)
with tf.get_default_graph().colocate_with(g):
clipped_g, _ = tf.clip_by_global_norm([g], clip)
else:
clipped_g = g
maniuplated_grads_and_vars.append([clipped_g, v])
train_step = optimizer.apply_gradients(maniuplated_grads_and_vars, global_step)
'''
# optimizer = tf.train.AdagradOptimizer(..)
# train_step = optimizer.mminimize(..)
# -- or --
# def minimize(self, ..):
# grad_and_vars = self.compute_gradients(loss, variables) # returning a list of tuple, not an Op
# train_step = self.apply_gradients(grads_and_vars, global_step) # returns an Op
# return train_step
#
# grad_and_vars = optimizer.compute_gradientS(loss, variables)
# grad_and_vars <-- list([gradient0, variable0], [gradient1, variable1], ....]
# for g, v in grad_and_vars:
# # manipulate g
# train_step = optimizer.apply_gradients(maniuplated_grads_and_vars, global_step)
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict)
def get_loss(inp_dict):
ls = sess.run(loss, feed_dict=inp_dict)
return ls
w_del_summary = tf.histogram_summary('w_del', w_del)
w_mother_summary = tf.histogram_summary('w_mother', w_mother)
a_summary = tf.histogram_summary('a', a)
if FLAGS.model_id == 'relu_window_exp':
# convolution weights, each layer is delta(x,y) - basically take window of stimulus.
window = FLAGS.window
n_pix = (2* window + 1) ** 2
w_mask = np.zeros((2 * window + 1, 2 * window + 1, 1, n_pix))
icnt = 0
for ix in range(2 * window + 1):
for iy in range(2 * window + 1):
w_mask[ix, iy, 0, icnt] =1
icnt = icnt + 1
mask_tf = tf.constant(np.array(w_mask, dtype='float32'))
# set weight and other variables
dimx = np.floor(1 + ((40 - (2 * window + 1))/FLAGS.stride)).astype('int')
dimy = np.floor(1 + ((80 - (2 * window + 1))/FLAGS.stride)).astype('int')
w = tf.Variable(np.array(0.01+ 0.005*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
a = tf.Variable(np.array(0.02+np.random.rand(dimx*dimy, nCells),dtype='float32'))
# get firing rate
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
stim_masked = tf.nn.conv2d(stim4D, mask_tf, strides=[1, FLAGS.stride, FLAGS.stride, 1], padding="VALID" )
stim_wts = tf.nn.relu(tf.reduce_sum(tf.mul(stim_masked, w), 3))
a_pos = tf.assign(a, (a + tf.abs(a))/2)
lam = tf.exp(tf.matmul(tf.reshape(stim_wts, [-1,dimx*dimy]),a))
loss_inter = (tf.reduce_sum(lam)/120. - tf.reduce_sum(resp*tf.log(lam)))/data_len
loss = loss_inter + FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w))
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss,var_list=[w,a])
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict)
sess.run(a_pos)
def get_loss(inp_dict):
ls = sess.run(loss, feed_dict=inp_dict)
return ls
w_summary = tf.histogram_summary('w', w)
a_summary = tf.histogram_summary('a', a)
if FLAGS.model_id == 'relu_window_mother_exp':
# convolution weights, each layer is delta(x,y) - basically take window of stimulus.
window = FLAGS.window
n_pix = (2* window + 1) ** 2
w_mask = np.zeros((2 * window + 1, 2 * window + 1, 1, n_pix))
icnt = 0
for ix in range(2 * window + 1):
for iy in range(2 * window + 1):
w_mask[ix, iy, 0, icnt] =1
icnt = icnt + 1
mask_tf = tf.constant(np.array(w_mask, dtype='float32'))
# set weight and other variables
dimx = np.floor(1 + ((40 - (2 * window + 1))/FLAGS.stride)).astype('int')
dimy = np.floor(1 + ((80 - (2 * window + 1))/FLAGS.stride)).astype('int')
w_del = tf.Variable(np.array(0.005*np.random.randn(dimx, dimy, n_pix),dtype='float32'))
w_mother = tf.Variable(np.array(0.01*np.ones((2 * window + 1, 2 * window + 1, 1, 1)),dtype='float32'))
a = tf.Variable(0.02+np.array(np.random.rand(dimx*dimy, nCells),dtype='float32'))
# get firing rate
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
# mother weight convolution
stim_convolved = tf.reduce_sum( tf.nn.conv2d(stim4D, w_mother, strides=[1, FLAGS.stride, FLAGS.stride, 1], padding="VALID"),3)
#
a_pos = tf.assign(a, (a + tf.abs(a))/2)
stim_masked = tf.nn.conv2d(stim4D, mask_tf, strides=[1, FLAGS.stride, FLAGS.stride, 1], padding="VALID" )
stim_del = tf.reduce_sum(tf.mul(stim_masked, w_del), 3)
su_act = tf.nn.relu(stim_del + stim_convolved)
lam = tf.exp(tf.matmul(tf.reshape(su_act, [-1, dimx*dimy]),a))
loss_inter = (tf.reduce_sum(lam)/120. - tf.reduce_sum(resp*tf.log(lam)))/data_len
loss = loss_inter + FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w_del))
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss,var_list=[w_mother, w_del, a])
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict)
sess.run(a_pos)
def get_loss(inp_dict):
ls = sess.run(loss, feed_dict=inp_dict)
return ls
w_del_summary = tf.histogram_summary('w_del', w_del)
w_mother_summary = tf.histogram_summary('w_mother', w_mother)
a_summary = tf.histogram_summary('a', a)
if FLAGS.model_id == 'relu_window_a_support':
# convolution weights, each layer is delta(x,y) - basically take window of stimulus.
window = FLAGS.window
n_pix = (2* window + 1) ** 2
w_mask = np.zeros((2 * window + 1, 2 * window + 1, 1, n_pix))
icnt = 0
for ix in range(2 * window + 1):
for iy in range(2 * window + 1):
w_mask[ix, iy, 0, icnt] =1
icnt = icnt + 1
mask_tf = tf.constant(np.array(w_mask, dtype='float32'))
# set weight and other variables
dimx = np.floor(1 + ((40 - (2 * window + 1))/FLAGS.stride)).astype('int')
dimy = np.floor(1 + ((80 - (2 * window + 1))/FLAGS.stride)).astype('int')
w = tf.Variable(np.array(0.001+ 0.0005*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
a = tf.Variable(np.array(0.002*np.random.rand(dimx*dimy, nCells),dtype='float32'))
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
stim_masked = tf.nn.conv2d(stim4D, mask_tf, strides=[1, FLAGS.stride, FLAGS.stride, 1], padding="VALID" )
stim_wts = tf.nn.relu(tf.reduce_sum(tf.mul(stim_masked, w), 3))
lam = tf.matmul(tf.reshape(stim_wts, [-1,dimx*dimy]),a)+0.0001
loss_inter = (tf.reduce_sum(lam)/120. - tf.reduce_sum(resp*tf.log(lam)))/data_len
loss = loss_inter + FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w))
a_pos = tf.assign(a, (a + tf.abs(a))/2)
# mask a to only relevant pixels
w_mother = tf.Variable(np.array(0.01*np.ones((2 * window + 1, 2 * window + 1, 1, 1)),dtype='float32'))
# mother weight convolution
stim_convolved = tf.reduce_sum( tf.nn.conv2d(stim4D, w_mother, strides=[1, FLAGS.stride, FLAGS.stride, 1], padding="VALID"),3)
sess.run(tf.initialize_all_variables())
mask_conv = sess.run(stim_convolved,feed_dict = {stim: total_mask})
mask_a_flat = np.array(np.reshape(mask_conv, [-1,dimx * dimy]).T >0, dtype='float32')
a_proj = tf.assign(a, a * mask_a_flat)
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss,var_list=[w,a])
def training(inp_dict):
sess.run([train_step, a_proj], feed_dict=inp_dict)
sess.run(a_pos)
def get_loss(inp_dict):
ls = sess.run(loss, feed_dict=inp_dict)
return ls
w_summary = tf.histogram_summary('w', w)
a_summary = tf.histogram_summary('a', a)
if FLAGS.model_id == 'exp_window_a_support':
# convolution weights, each layer is delta(x,y) - basically take window of stimulus.
window = FLAGS.window
n_pix = (2* window + 1) ** 2
w_mask = np.zeros((2 * window + 1, 2 * window + 1, 1, n_pix))
icnt = 0
for ix in range(2 * window + 1):
for iy in range(2 * window + 1):
w_mask[ix, iy, 0, icnt] =1
icnt = icnt + 1
mask_tf = tf.constant(np.array(w_mask, dtype='float32'))
# set weight and other variables
dimx = np.floor(1 + ((40 - (2 * window + 1))/FLAGS.stride)).astype('int')
dimy = np.floor(1 + ((80 - (2 * window + 1))/FLAGS.stride)).astype('int')
w = tf.Variable(np.array(0.001+ 0.0005*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
a = tf.Variable(np.array(0.002*np.random.rand(dimx*dimy, nCells),dtype='float32'))
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
stim_masked = tf.nn.conv2d(stim4D, mask_tf, strides=[1, FLAGS.stride, FLAGS.stride, 1], padding="VALID" )
stim_wts = tf.exp(tf.reduce_sum(tf.mul(stim_masked, w), 3))
lam = tf.matmul(tf.reshape(stim_wts, [-1,dimx*dimy]),a)
loss_inter = (tf.reduce_sum(lam)/120. - tf.reduce_sum(resp*tf.log(lam)))/data_len
loss = loss_inter + FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w))
a_pos = tf.assign(a, (a + tf.abs(a))/2)
# mask a to only relevant pixels
w_mother = tf.Variable(np.array(0.01*np.ones((2 * window + 1, 2 * window + 1, 1, 1)),dtype='float32'))
# mother weight convolution
stim_convolved = tf.reduce_sum( tf.nn.conv2d(stim4D, w_mother, strides=[1, FLAGS.stride, FLAGS.stride, 1], padding="VALID"),3)
sess.run(tf.initialize_all_variables())
mask_conv = sess.run(stim_convolved,feed_dict = {stim: total_mask})
mask_a_flat = np.array(np.reshape(mask_conv, [-1,dimx * dimy]).T >0, dtype='float32')
a_proj = tf.assign(a, a * mask_a_flat)
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss,var_list=[w,a])
def training(inp_dict):
sess.run([train_step, a_proj], feed_dict=inp_dict)
sess.run(a_pos)
def get_loss(inp_dict):
ls = sess.run(loss, feed_dict=inp_dict)
return ls
w_summary = tf.histogram_summary('w', w)
a_summary = tf.histogram_summary('a', a)
# initialize the model
# make summary writers
#logfile = gfile.Open(save_filename + '.txt', "a")
# make summary writers
l_summary = tf.scalar_summary('loss',loss)
l_inter_summary = tf.scalar_summary('loss_inter',loss_inter)
#tf.image_summary('a_image',tf.expand_dims(tf.expand_dims(tf.squeeze(tf.nn.relu(a)),0),-1))
#tf.image_summary('w_image',tf.expand_dims(tf.transpose(tf.reshape(w, [40, 80, Nsub]), [2, 0, 1]), -1), max_images=50)
# Merge all the summaries and write them out to /tmp/mnist_logs (by default)
merged = tf.merge_all_summaries()
train_writer = tf.train.SummaryWriter(FLAGS.save_location + 'train',
sess.graph)
test_writer = tf.train.SummaryWriter(FLAGS.save_location + 'test')
print('\nStarting new code')
print('\nModel:' + FLAGS.model_id)
sess.run(tf.initialize_all_variables())
saver_var = tf.train.Saver(tf.all_variables(), keep_checkpoint_every_n_hours=0.05)
load_prev = False
start_iter=0
try:
latest_filename = short_filename + '_latest_fn'
restore_file = tf.train.latest_checkpoint(FLAGS.save_location, latest_filename)
start_iter = int(restore_file.split('/')[-1].split('-')[-1])
saver_var.restore(sess, restore_file)
load_prev = True
except:
print('No previous dataset')
if load_prev:
#logfile.write('\nPrevious results loaded')
print('\nPrevious results loaded')
else:
#logfile.write('\nVariables initialized')
print('\nVariables initialized')
#logfile.flush()
# Do the fitting
icnt = 0
stim_test,resp_test,test_length = get_test_data()
fd_test = {stim: stim_test,
resp: resp_test,
data_len: test_length}
#logfile.close()
for istep in np.arange(start_iter,400000):
# get training data
stim_train, resp_train, train_len = get_next_training_batch(istep)
fd_train = {stim: stim_train,
resp: resp_train,
data_len: train_len}
# take training step
training(fd_train)
if istep%10 == 0:
# compute training and testing losses
ls_train = get_loss(fd_train)
ls_test = get_loss(fd_test)
ls_train_log = np.append(ls_train_log, ls_train)
ls_test_log = np.append(ls_test_log, ls_test)
latest_filename = short_filename + '_latest_fn'
saver_var.save(sess, save_filename, global_step=istep, latest_filename = latest_filename)
# add training summary
summary = sess.run(merged, feed_dict=fd_train)
train_writer.add_summary(summary,istep)
# add testing summary
summary = sess.run(merged, feed_dict=fd_test)
test_writer.add_summary(summary,istep)
# log results
#logfile = gfile.Open(save_filename + '.txt', "a")
#logfile.write('\nIterations: ' + str(istep) + ' Training error: '
# + str(ls_train) + ' Testing error: ' + str(ls_test) +
# ' w_l1_norm: ' + str(np.sum(np.abs(w.eval()))) +
# ' a_l1_norm: ' + str(np.sum(np.abs(a.eval()))))
#logfile.close()
#logfile.flush()
icnt += FLAGS.batchsz
if icnt > 216000-1000:
icnt = 0
tms = np.random.permutation(np.arange(216000-1000))
# write_filename = save_filename + '.pkl'
# write_file = gfile.Open(write_filename, 'wb')
# save_data = {'w': w.eval(), 'a': a.eval(), 'w_init': w_init,
# 'a_init': a_init, 'w_load': w_load, 'a_load': a_load,
# 'ls_train_log': ls_train_log,
# 'ls_test_log': ls_test_log, 'last_iter': istep, 'chunk_order': chunk_order}
# pickle.dump(save_data,write_file)
# write_file.close()
#logfile.close()
if __name__ == '__main__':
app.run()
|
googlearchive/rgc-models
|
response_model/python/population_subunits/coarse/fitting/whole_population_fixed_tf_2.py
|
Python
|
apache-2.0
| 43,300
|
'''
Created on July 26, 2013
Example service created for a weather sensor. An Arduino POSTs simple JSON value-only updates to the
REST endpoints defined by the Observable Property created for each sensor output. An example graph is
created to demonstrate how endpoints can be discovered by reading the graph meta data
@author: mjkoster
'''
from core.SmartObject import SmartObject
from core.Description import Description
from core.ObservableProperty import ObservableProperty
from core.Observers import Observers
from core.PropertyOfInterest import PropertyOfInterest
from rdflib.term import Literal, URIRef
from rdflib.namespace import RDF, RDFS, XSD, OWL
from interfaces.HttpObjectService import HttpObjectService
from interfaces.CoapObjectService import CoapObjectService
from time import sleep
import sys
#workaround to register rdf JSON plugins
import rdflib
from rdflib.plugin import Serializer, Parser
rdflib.plugin.register('json-ld', Serializer, 'rdflib_jsonld.serializer', 'JsonLDSerializer')
rdflib.plugin.register('json-ld', Parser, 'rdflib_jsonld.parser', 'JsonLDParser')
rdflib.plugin.register('rdf-json', Serializer, 'rdflib_rdfjson.rdfjson_serializer', 'RdfJsonSerializer')
rdflib.plugin.register('rdf-json', Parser, 'rdflib_rdfjson.rdfjson_parser', 'RdfJsonParser')
if __name__ == '__main__' :
baseObject = HttpObjectService().baseObject # make an instance of the service, default object root and default port 8000
coapService = CoapObjectService(baseObject)
# create the weather station resource template
# emulate the .well-known/core interface
baseObject.create({'resourceName': '.well-known','resourceClass': 'SmartObject'},\
).create({'resourceName': 'core','resourceClass': 'LinkFormatProxy'})
# sensors resource under the baseObject for all sensors
# top level object container for sensors, default class is SmartObject
sensors = baseObject.create({'resourceName': 'sensors', 'resourceClass': 'SmartObject'})
#weather resource under sensors for the weather sensor
# create a default class SmartObject for the weather sensor cluster
weather = sensors.create({'resourceName': 'rhvWeather-01', 'resourceClass': 'SmartObject'})
# example description in simple link-format like concepts
baseObject.Description.set((URIRef('sensors/rhvWeather-01'), RDFS.Class, Literal('SmartObject')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01'), RDF.type, Literal('SensorSystem')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01'), RDFS.Resource, Literal('Weather')))
#
baseObject.Description.set((URIRef('sensors/rhvWeather-01/outdoor_temperature'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/outdoor_temperature'), RDFS.Resource, Literal('temperature')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/outdoor_humidity'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/outdoor_humidity'), RDFS.Resource, Literal('humidity')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/sealevel_pressure'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/sealevel_pressure'), RDFS.Resource, Literal('pressure')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/indoor_temperature'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/indoor_temperature'), RDFS.Resource, Literal('temperature')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/indoor_humidity'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/indoor_humidity'), RDFS.Resource, Literal('humidity')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/wind_gust'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/wind_gust'), RDFS.Resource, Literal('speed')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/wind_speed'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/wind_speed'), RDFS.Resource, Literal('speed')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/wind_direction'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/wind_direction'), RDFS.Resource, Literal('direction')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/current_rain'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/current_rain'), RDFS.Resource, Literal('depth')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/hourly_rain'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/hourly_rain'), RDFS.Resource, Literal('depth')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/daily_rain'), RDF.type, Literal('sensor')))
baseObject.Description.set((URIRef('sensors/rhvWeather-01/daily_rain'), RDFS.Resource, Literal('depth')))
# now create an Observable Property for each sensor output
pushInterval = 10 # number of samples to delay each push to Xively
outdoor_temperature = weather.create({'resourceName': 'outdoor_temperature',\
'resourceClass': 'ObservableProperty'})
outdoor_temperature.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
outdoor_humidity = weather.create({'resourceName': 'outdoor_humidity',\
'resourceClass': 'ObservableProperty'})
outdoor_humidity.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
sealevel_pressure = weather.create({'resourceName': 'sealevel_pressure',\
'resourceClass': 'ObservableProperty'})
sealevel_pressure.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
indoor_temperature = weather.create({'resourceName': 'indoor_temperature',\
'resourceClass': 'ObservableProperty'})
indoor_temperature.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
indoor_humidity = weather.create({'resourceName': 'indoor_humidity',\
'resourceClass': 'ObservableProperty'})
indoor_humidity.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
wind_gust = weather.create({'resourceName': 'wind_gust',\
'resourceClass': 'ObservableProperty'})
wind_gust.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
wind_speed = weather.create({'resourceName': 'wind_speed',\
'resourceClass': 'ObservableProperty'})
wind_speed.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
wind_direction = weather.create({'resourceName': 'wind_direction',\
'resourceClass': 'ObservableProperty'})
wind_direction.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
current_rain = weather.create({'resourceName': 'current_rain',\
'resourceClass': 'ObservableProperty'})
current_rain.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
hourly_rain = weather.create({'resourceName': 'hourly_rain',\
'resourceClass': 'ObservableProperty'})
hourly_rain.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
daily_rain = weather.create({'resourceName': 'daily_rain',\
'resourceClass': 'ObservableProperty'})
daily_rain.Observers.create({'resourceName': 'mqttTestObserver',\
'resourceClass': 'mqttObserver',\
'connection': 'smartobjectservice.com',\
'pubTopic': ''})
try:
# register handlers etc.
while 1: sleep(1)
except KeyboardInterrupt: pass
print 'got KeyboardInterrupt'
|
connectIOT/iottoolkit
|
old/WeatherSensorMQTTSubscriber.py
|
Python
|
apache-2.0
| 10,322
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAC wrapper.
"""
from typing import Type
from absl import logging
from tink.proto import tink_pb2
from tink import core
from tink.mac import _mac
class _WrappedMac(_mac.Mac):
"""Implements Mac for a set of Mac primitives."""
def __init__(self, pset: core.PrimitiveSet):
self._primitive_set = pset
def compute_mac(self, data: bytes) -> bytes:
primary = self._primitive_set.primary()
if primary.output_prefix_type == tink_pb2.LEGACY:
return primary.identifier + primary.primitive.compute_mac(
data + core.crypto_format.LEGACY_START_BYTE)
else:
return primary.identifier + primary.primitive.compute_mac(data)
def verify_mac(self, mac_value: bytes, data: bytes) -> None:
if len(mac_value) <= core.crypto_format.NON_RAW_PREFIX_SIZE:
# This also rejects raw MAC with size of 4 bytes or fewer. Those MACs are
# clearly insecure, thus should be discouraged.
raise core.TinkError('tag too short')
prefix = mac_value[:core.crypto_format.NON_RAW_PREFIX_SIZE]
mac_no_prefix = mac_value[core.crypto_format.NON_RAW_PREFIX_SIZE:]
for entry in self._primitive_set.primitive_from_identifier(prefix):
try:
if entry.output_prefix_type == tink_pb2.LEGACY:
entry.primitive.verify_mac(mac_no_prefix, data + b'\x00')
else:
entry.primitive.verify_mac(mac_no_prefix, data)
# If there is no exception, the MAC is valid and we can return.
return
except core.TinkError as e:
logging.info('tag prefix matches a key, but cannot verify: %s', e)
# No 'non-raw' key matched, so let's try the raw keys (if any exist).
for entry in self._primitive_set.raw_primitives():
try:
entry.primitive.verify_mac(mac_value, data)
# If there is no exception, the MAC is valid and we can return.
return
except core.TinkError as e:
pass
raise core.TinkError('invalid MAC')
class MacWrapper(core.PrimitiveWrapper[_mac.Mac, _mac.Mac]):
"""MacWrapper is the implementation of PrimitiveWrapper for the Mac primitive.
The returned primitive works with a keyset (rather than a single key). To
compute a MAC tag, it uses the primary key in the keyset, and prepends to the
tag a certain prefix associated with the primary key. To verify a tag, the
primitive uses the prefix of the tag to efficiently select the right key in
the set. If the keys associated with the prefix do not validate the tag, the
primitive tries all keys with tink_pb2.OutputPrefixType = tink_pb2.RAW.
"""
def wrap(self, pset: core.PrimitiveSet) -> _mac.Mac:
return _WrappedMac(pset)
def primitive_class(self) -> Type[_mac.Mac]:
return _mac.Mac
def input_primitive_class(self) -> Type[_mac.Mac]:
return _mac.Mac
|
google/tink
|
python/tink/mac/_mac_wrapper.py
|
Python
|
apache-2.0
| 3,361
|
from evdev import InputDevice, ecodes, InputEvent
from select import select
mouse = InputDevice('/dev/input/event4') # mouse
keyboard = InputDevice('/dev/input/event5') # keyboard
while True:
r, w, x = select([mouse], [], [])
for dev in r:
for event in dev.read():
print(event.__class__, event)
|
Morgaroth/events_manager
|
py/handle.py
|
Python
|
apache-2.0
| 327
|
from app import db
import sys
if sys.version_info >= (3, 0):
enable_search = False
else:
enable_search = True
import flask.ext.whooshalchemy as whooshalchemy
class Doctor(db.Model):
"""To indicate who is whose family doctor"""
__tablename__ = 'family_doctor'
# Fields
doctor_id = db.Column(db.Integer, db.ForeignKey('persons.person_id'), primary_key=True)
patient_id = db.Column(db.Integer, db.ForeignKey('persons.person_id'), primary_key=True)
# Relationships
doctor = db.relationship("Person",
foreign_keys=[doctor_id],
backref=db.backref("doctor_doctors", lazy='dynamic'))
patient = db.relationship("Person", foreign_keys=[patient_id], backref="doctor_patients")
def __repr__(self):
return '<FamilyDoctor %r %r>' % (self.doctor_id, self.patient_id)
class Person(db.Model):
"""To Store Personal Information"""
__tablename__ = 'persons'
# Fields
person_id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.VARCHAR(24))
last_name = db.Column(db.VARCHAR(24))
address = db.Column(db.VARCHAR(128))
email = db.Column(db.VARCHAR(128), unique=True)
phone = db.Column(db.CHAR(10))
# Relationships
users = db.relationship('User', backref='person')
doctors = db.relationship('Person', secondary='family_doctor',
primaryjoin=person_id == Doctor.patient_id,
secondaryjoin=person_id == Doctor.doctor_id,
backref=db.backref('patients', lazy='dynamic'))
def __repr__(self):
return '<Person %r>' % (self.person_id)
class User(db.Model):
"""To store the log-in information
Note that a person may have been assigned different
user_name(s), depending on his/her role in the log-in"""
__tablename__ = 'users'
# Fields
user_name = db.Column(db.VARCHAR(24), primary_key=True)
password = db.Column(db.VARCHAR(24))
user_class = db.Column('class', db.Enum('a', 'p', 'd', 'r'))
date_registered = db.Column(db.Date)
person_id = db.Column(db.Integer, db.ForeignKey('persons.person_id'))
# Required for Flask-Login to use this as a User class
def is_authenticated(self):
return True
# Required for Flask-Login to use this as a User class
def is_active(self):
return True
# Required for Flask-Login to use this as a User class
def is_anonymous(self):
return False
# Required for Flask-Login to use this as a User class
def get_id(self):
return str(self.user_name)
def __repr__(self):
return '<User %r>' % (self.user_name)
class Record(db.Model):
"""To store the radiology records"""
__tablename__ = 'radiology_record'
__searchable__ = ['test_type', 'prescriding_date', 'test_date', 'diagnosis', 'description']
# Fields
record_id = db.Column(db.Integer, primary_key=True)
patient_id = db.Column(db.Integer, db.ForeignKey('persons.person_id'))
doctor_id = db.Column(db.Integer, db.ForeignKey('persons.person_id'))
radiologist_id = db.Column(db.Integer, db.ForeignKey('persons.person_id'))
test_type = db.Column(db.VARCHAR(24))
prescribing_date = db.Column(db.Date)
test_date = db.Column(db.Date)
diagnosis = db.Column(db.VARCHAR(128))
description = db.Column(db.VARCHAR(1024))
# Relationships
images = db.relationship('Image', backref="record", lazy='dynamic')
doctor = db.relationship("Person", foreign_keys=[doctor_id], backref="record_doctor")
patient = db.relationship("Person", foreign_keys=[patient_id], backref="record_patient")
radiologist = db.relationship("Person", foreign_keys=[radiologist_id], backref="record_radiologist")
def __repr__(self):
return '<Record %r>' % (self.record_id)
class Image(db.Model):
"""To store the pacs images"""
__tablename__ = 'pacs_images'
image_id = db.Column(db.Integer, primary_key=True)
record_id = db.Column(db.Integer, db.ForeignKey('radiology_record.record_id'))
thumbnail = db.Column(db.BLOB)
regular_size = db.Column(db.BLOB)
full_size = db.Column(db.BLOB)
def __repr__(self):
return '<Pacs Image %r %r>' % (self.image_id, self.record_id)
if enable_search:
whooshalchemy.whoosh_index(app, Record)
|
MarkGalloway/RIS
|
app/models.py
|
Python
|
apache-2.0
| 4,370
|
from rest_framework.test import APITestCase, APITransactionTestCase, APIClient
from django.contrib.auth.models import User # AnonymousUser,
from os import environ as env
# https://stackoverflow.com/questions/44450533/difference-between-testcase-and-transactiontestcase-classes-in-django-test
# ^APITestCase vs APITransactionTestCase
# https://www.django-rest-framework.org/api-guide/testing/
# ^DRF testing guide
# https://django-testing-docs.readthedocs.io/en/latest/fixtures.html
# ^testing with fixtures
# https://stackoverflow.com/questions/5875111/running-a-specific-test-case-in-django-when-your-app-has-a-tests-directory
# ^running the tests
client = APIClient()
user = User.objects.get(username=env["WOPEN_SUPERUSER"] + env["WOPEN_EMAIL_DOMAIN"])
client.force_authenticate(user=user)
class AlertsTests(APITestCase):
def test_read(self):
response = client.get('/api/alerts/')
assert response.status_code == 200
def test_ordering(self):
response = client.get('/api/alerts/?ordering=urgency_tag')
assert response.status_code == 200
def test_search(self):
response = client.get('/api/alerts/?search=srct&format=json')
assert response.status_code == 200
def test_filtering(self):
response = client.get('/api/alerts/?urgency_tag=major&format=json')
assert response.status_code == 200
class CategoriesTests(APITestCase):
def test_read(self):
response = client.get('/api/categories/')
assert response.status_code == 200
def test_ordering(self):
response = client.get('/api/categories/?ordering=name')
assert response.status_code == 200
def test_search(self):
response = client.get('/api/categories/?search=din&format=json')
assert response.status_code == 200
def test_filtering(self):
response = client.get('/api/categories/?name=dining&format=json')
assert response.status_code == 200
class facilitiesTests(APITestCase):
def test_read(self):
response = client.get('/api/facilities/')
assert response.status_code == 200
def test_ordering(self):
response = client.get('/api/facilities/?ordering=-facility_classifier')
assert response.status_code == 200
def test_search(self):
response = client.get('/api/facilities/?search=south&format=json')
assert response.status_code == 200
def test_filtering(self):
response = client.get('/api/facilities/?facility_name=Southside')
assert response.status_code == 200
class LocationsTests(APITestCase):
def test_read(self):
response = client.get('/api/locations/')
assert response.status_code == 200
def test_ordering(self):
response = client.get('/api/locations/?ordering=-address')
assert response.status_code == 200
def test_search(self):
response = client.get('/api/locations/?search=johnson&format=json')
assert response.status_code == 200
def test_filtering(self):
response = client.get('/api/locations/?building=Johnson+Center&format=json')
assert response.status_code == 200
class ScheduleTests(APITestCase):
def test_read(self):
response = client.get('/api/schedules/')
assert response.status_code == 200
def test_ordering(self):
response = client.get('/api/schedules/?ordering=name')
assert response.status_code == 200
"""Invalid value south?"""
def test_search(self):
response = client.get('/api/schedules/?search=Southside+[Fall+%2FSpring+Hours]')
#print(dir(response))
assert response.status_code == 200
def test_filtering(self):
response = client.get('/api/schedules/?name=&valid_start=&valid_end=&twenty_four_hours=true')
self.assertTrue(response.status_code == 200)
def test_post(self):
response = client.post('/api/schedules/', {
"name": "hi",
"valid_start": None,
"valid_end": None,
"twenty_four_hours": False
}, format='json')
assert response.status_code == 201
# class OpenTimeTests(APITestCase):
# def test_read(self):
# response = client.get('/api/categories/')
# assert response.status_code == 200
# def test_ordering(self):
# self.assertTrue(True)
# def test_search(self):
# self.assertTrue(True)
# def test_filtering(self):
# self.assertTrue(True)
# def test_post(self):
# client.post('/notes/', {'title': 'new idea'}, format='json')
# self.assertTrue(True)
|
srct/whats-open
|
whats-open/api/tests/APIClientTests.py
|
Python
|
apache-2.0
| 4,803
|
# -*- coding: utf-8 -*-
"""
Package containing a data validation schema for the machine class register.
---
type:
python_package
validation_level:
v00_minimum
protection:
k00_public
copyright:
"Copyright 2016 High Integrity Artificial Intelligence Systems"
license:
"Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."
...
"""
from good import (Extra,
Reject,
Schema)
import da.check.schema.common
# -----------------------------------------------------------------------------
def get(idclass_tab):
"""
Return the data validation schema for the machine register.
"""
common = da.check.schema.common
machine_id = idclass_tab['machine']
return Schema({
'title': common.TITLE_TEXT,
'introduction': common.PARAGRAPH_TEXT,
'register': {
machine_id: {
'hostname': common.LOWERCASE_NAME,
'desc': common.TITLE_TEXT
}
},
Extra: Reject
})
|
wtpayne/hiai
|
a3_src/h70_internal/da/check/schema/machine_register.py
|
Python
|
apache-2.0
| 1,618
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from magnumclient.common.apiclient import exceptions
from magnumclient.tests.v1 import shell_test_base
from magnumclient.v1.baymodels import BayModel
class FakeBayModel(BayModel):
def __init__(self, manager=None, info={}, **kwargs):
BayModel.__init__(self, manager=manager, info=info)
self.apiserver_port = kwargs.get('apiserver_port', None)
self.uuid = kwargs.get('uuid', 'x')
self.links = kwargs.get('links', [])
self.server_type = kwargs.get('server_type', 'vm')
self.image_id = kwargs.get('image_id', 'x')
self.tls_disabled = kwargs.get('tls_disabled', False)
self.registry_enabled = kwargs.get('registry_enabled', False)
self.coe = kwargs.get('coe', 'x')
self.public = kwargs.get('public', False)
self.name = kwargs.get('name', 'x')
class ShellTest(shell_test_base.TestCommandLineArgument):
def _get_expected_args_list(self, limit=None, sort_dir=None,
sort_key=None, detail=False):
expected_args = {}
expected_args['limit'] = limit
expected_args['sort_dir'] = sort_dir
expected_args['sort_key'] = sort_key
expected_args['detail'] = detail
return expected_args
def _get_expected_args(self, image_id, external_network_id, coe,
master_flavor_id=None, name=None,
keypair_id=None, fixed_network=None,
fixed_subnet=None, network_driver=None,
volume_driver=None, dns_nameserver='8.8.8.8',
flavor_id='m1.medium',
docker_storage_driver='devicemapper',
docker_volume_size=None, http_proxy=None,
https_proxy=None, no_proxy=None, labels={},
tls_disabled=False, public=False,
master_lb_enabled=False, server_type='vm',
registry_enabled=False, floating_ip_enabled=None):
expected_args = {}
expected_args['image_id'] = image_id
expected_args['external_network_id'] = external_network_id
expected_args['coe'] = coe
expected_args['master_flavor_id'] = master_flavor_id
expected_args['name'] = name
expected_args['keypair_id'] = keypair_id
expected_args['fixed_network'] = fixed_network
expected_args['fixed_subnet'] = fixed_subnet
expected_args['network_driver'] = network_driver
expected_args['volume_driver'] = volume_driver
expected_args['dns_nameserver'] = dns_nameserver
expected_args['flavor_id'] = flavor_id
expected_args['docker_volume_size'] = docker_volume_size
expected_args['docker_storage_driver'] = docker_storage_driver
expected_args['http_proxy'] = http_proxy
expected_args['https_proxy'] = https_proxy
expected_args['no_proxy'] = no_proxy
expected_args['labels'] = labels
expected_args['tls_disabled'] = tls_disabled
expected_args['public'] = public
expected_args['master_lb_enabled'] = master_lb_enabled
expected_args['server_type'] = server_type
expected_args['registry_enabled'] = registry_enabled
return expected_args
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test '
'--image-id test_image '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--coe swarm '
'--dns-nameserver test_dns '
'--flavor-id test_flavor '
'--fixed-network private '
'--fixed-subnet private-subnet '
'--volume-driver test_volume '
'--network-driver test_driver '
'--labels key=val '
'--master-flavor-id test_flavor '
'--docker-volume-size 10 '
'--docker-storage-driver devicemapper '
'--public '
'--server-type vm '
'--master-lb-enabled '
'--floating-ip-enabled ')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
dns_nameserver='test_dns', public=True,
flavor_id='test_flavor',
master_flavor_id='test_flavor',
fixed_network='private',
fixed_subnet='private-subnet',
server_type='vm',
network_driver='test_driver',
volume_driver='test_volume',
docker_storage_driver='devicemapper',
docker_volume_size=10,
master_lb_enabled=True,
labels={'key': 'val'})
expected_args['floating_ip_enabled'] = True
mock_create.assert_called_with(**expected_args)
self._test_arg_success('baymodel-create '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe kubernetes '
'--name test '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair',
coe='kubernetes',
external_network_id='test_net',
server_type='vm')
mock_create.assert_called_with(**expected_args)
self._test_arg_success('baymodel-create '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe kubernetes '
'--name test '
'--server-type vm '
'--floating-ip-disabled ')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair',
coe='kubernetes',
external_network_id='test_net',
server_type='vm',
floating_ip_enabled=False)
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_success_no_servertype(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test '
'--image-id test_image '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--coe swarm '
'--dns-nameserver test_dns '
'--flavor-id test_flavor '
'--fixed-network public '
'--network-driver test_driver '
'--labels key=val '
'--master-flavor-id test_flavor '
'--docker-volume-size 10 '
'--docker-storage-driver devicemapper '
'--public ')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
dns_nameserver='test_dns', public=True,
flavor_id='test_flavor',
master_flavor_id='test_flavor',
fixed_network='public',
network_driver='test_driver',
docker_storage_driver='devicemapper',
docker_volume_size=10,
labels={'key': 'val'})
mock_create.assert_called_with(**expected_args)
self._test_arg_success('baymodel-create '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe kubernetes '
'--name test ')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair',
coe='kubernetes',
external_network_id='test_net')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_success_with_registry_enabled(
self, mock_create):
self._test_arg_success('baymodel-create '
'--name test '
'--network-driver test_driver '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--registry-enabled')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
network_driver='test_driver',
registry_enabled=True)
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_public_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test --network-driver test_driver '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--public '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
public=True, server_type='vm',
network_driver='test_driver')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_success_with_master_flavor(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test '
'--image-id test_image '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--coe swarm '
'--dns-nameserver test_dns '
'--master-flavor-id test_flavor')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
dns_nameserver='test_dns',
master_flavor_id='test_flavor')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_docker_vol_size_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test --docker-volume-size 4514 '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
server_type='vm',
docker_volume_size=4514)
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_docker_storage_driver_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--docker-storage-driver devicemapper '
'--coe swarm'
)
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
docker_storage_driver='devicemapper')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_fixed_network_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test --fixed-network private '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
fixed_network='private',
external_network_id='test_net',
server_type='vm')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_network_driver_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test --network-driver test_driver '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
server_type='vm',
network_driver='test_driver')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_volume_driver_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test --volume-driver test_volume '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
server_type='vm',
volume_driver='test_volume')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_http_proxy_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test --fixed-network private '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--http-proxy http_proxy '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
fixed_network='private',
server_type='vm',
http_proxy='http_proxy')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_https_proxy_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test --fixed-network private '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--https-proxy https_proxy '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
fixed_network='private',
server_type='vm',
https_proxy='https_proxy')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_no_proxy_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test --fixed-network private '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--no-proxy no_proxy '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
fixed_network='private',
server_type='vm',
no_proxy='no_proxy')
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_labels_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test '
'--labels key=val '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
server_type='vm',
labels={'key': 'val'})
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_separate_labels_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test '
'--labels key1=val1 '
'--labels key2=val2 '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
server_type='vm',
labels={'key1': 'val1', 'key2': 'val2'})
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_combined_labels_success(self, mock_create):
self._test_arg_success('baymodel-create '
'--name test '
'--labels key1=val1,key2=val2 '
'--keypair-id test_keypair '
'--external-network-id test_net '
'--image-id test_image '
'--coe swarm '
'--server-type vm')
expected_args = \
self._get_expected_args(name='test', image_id='test_image',
keypair_id='test_keypair', coe='swarm',
external_network_id='test_net',
server_type='vm',
labels={'key1': 'val1', 'key2': 'val2'})
mock_create.assert_called_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.create')
def test_baymodel_create_failure_few_arg(self, mock_create):
self._test_arg_failure('baymodel-create '
'--name test', self._mandatory_arg_error)
mock_create.assert_not_called()
self._test_arg_failure('baymodel-create '
'--image-id test', self._mandatory_arg_error)
mock_create.assert_not_called()
self._test_arg_failure('baymodel-create '
'--keypair-id test', self._mandatory_arg_error)
mock_create.assert_not_called()
self._test_arg_failure('baymodel-create '
'--external-network-id test',
self._mandatory_arg_error)
mock_create.assert_not_called()
self._test_arg_failure('baymodel-create '
'--coe test', self._mandatory_arg_error)
mock_create.assert_not_called()
self._test_arg_failure('baymodel-create '
'--server-type test', self._mandatory_arg_error)
mock_create.assert_not_called()
self._test_arg_failure('baymodel-create', self._mandatory_arg_error)
mock_create.assert_not_called()
@mock.patch('magnumclient.v1.baymodels.BayModelManager.get')
def test_baymodel_show_success(self, mock_show):
self._test_arg_success('baymodel-show xxx')
mock_show.assert_called_once_with('xxx')
@mock.patch('magnumclient.v1.baymodels.BayModelManager.get')
def test_baymodel_show_failure_no_arg(self, mock_show):
self._test_arg_failure('baymodel-show', self._few_argument_error)
mock_show.assert_not_called()
@mock.patch('magnumclient.v1.baymodels.BayModelManager.delete')
def test_baymodel_delete_success(self, mock_delete):
self._test_arg_success('baymodel-delete xxx')
mock_delete.assert_called_once_with('xxx')
@mock.patch('magnumclient.v1.baymodels.BayModelManager.delete')
def test_baymodel_delete_multiple_id_success(self, mock_delete):
self._test_arg_success('baymodel-delete xxx xyz')
calls = [mock.call('xxx'), mock.call('xyz')]
mock_delete.assert_has_calls(calls)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.delete')
def test_baymodel_delete_failure_no_arg(self, mock_delete):
self._test_arg_failure('baymodel-delete', self._few_argument_error)
mock_delete.assert_not_called()
@mock.patch('magnumclient.v1.baymodels.BayModelManager.update')
def test_baymodel_update_success(self, mock_update):
self._test_arg_success('baymodel-update test add test=test')
patch = [{'op': 'add', 'path': '/test', 'value': 'test'}]
mock_update.assert_called_once_with('test', patch)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.update')
def test_baymodel_update_success_many_attribute(self, mock_update):
self._test_arg_success('baymodel-update test '
'add test=test test1=test1')
patch = [{'op': 'add', 'path': '/test', 'value': 'test'},
{'op': 'add', 'path': '/test1', 'value': 'test1'}]
mock_update.assert_called_once_with('test', patch)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.update')
def test_baymodel_update_failure_wrong_op(self, mock_update):
_error_msg = [
'.*?^usage: magnum baymodel-update ',
'.*?^error: argument <op>: invalid choice: ',
".*?^Try 'magnum help baymodel-update' for more information."
]
self._test_arg_failure('baymodel-update test wrong test=test',
_error_msg)
mock_update.assert_not_called()
@mock.patch('magnumclient.v1.baymodels.BayModelManager.update')
def test_baymodel_update_failure_few_args(self, mock_update):
_error_msg = [
'.*?^usage: magnum baymodel-update ',
'.*?^error: (the following arguments|too few arguments)',
".*?^Try 'magnum help baymodel-update' for more information."
]
self._test_arg_failure('baymodel-update', _error_msg)
mock_update.assert_not_called()
self._test_arg_failure('baymodel-update test', _error_msg)
mock_update.assert_not_called()
@mock.patch('magnumclient.v1.baymodels.BayModelManager.list')
def test_baymodel_list_success(self, mock_list):
self._test_arg_success('baymodel-list')
expected_args = self._get_expected_args_list()
mock_list.assert_called_once_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.list')
def test_baymodel_list_success_with_arg(self, mock_list):
self._test_arg_success('baymodel-list '
'--limit 1 '
'--sort-dir asc '
'--sort-key uuid')
expected_args = self._get_expected_args_list(1, 'asc', 'uuid')
mock_list.assert_called_once_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.list')
def test_baymodel_list_success_detailed(self, mock_list):
self._test_arg_success('baymodel-list '
'--detail')
expected_args = self._get_expected_args_list(detail=True)
mock_list.assert_called_once_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.list')
def test_baymodel_list_ignored_duplicated_field(self, mock_list):
mock_list.return_value = [FakeBayModel()]
self._test_arg_success('baymodel-list --fields coe,coe,coe,name,name',
keyword='\n| uuid | name | Coe |\n')
# Output should be
# +------+------+-----+
# | uuid | name | Coe |
# +------+------+-----+
# | x | x | x |
# +------+------+-----+
expected_args = self._get_expected_args_list()
mock_list.assert_called_once_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.list')
def test_baymodel_list_failure_with_invalid_field(self, mock_list):
mock_list.return_value = [FakeBayModel()]
_error_msg = [".*?^Non-existent fields are specified: ['xxx','zzz']"]
self.assertRaises(exceptions.CommandError,
self._test_arg_failure,
'baymodel-list --fields xxx,coe,zzz',
_error_msg)
expected_args = self._get_expected_args_list()
mock_list.assert_called_once_with(**expected_args)
@mock.patch('magnumclient.v1.baymodels.BayModelManager.list')
def test_baymodel_list_failure_invalid_arg(self, mock_list):
_error_msg = [
'.*?^usage: magnum baymodel-list ',
'.*?^error: argument --sort-dir: invalid choice: ',
".*?^Try 'magnum help baymodel-list' for more information."
]
self._test_arg_failure('baymodel-list --sort-dir aaa', _error_msg)
mock_list.assert_not_called()
@mock.patch('magnumclient.v1.baymodels.BayModelManager.list')
def test_baymodel_list_failure(self, mock_list):
self._test_arg_failure('baymodel-list --wrong',
self._unrecognized_arg_error)
mock_list.assert_not_called()
|
openstack/python-magnumclient
|
magnumclient/tests/v1/test_baymodels_shell.py
|
Python
|
apache-2.0
| 31,969
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The nPMI visualization plugin."""
import math
from werkzeug import wrappers
from tensorboard import plugin_util
from tensorboard.plugins import base_plugin
from tensorboard.backend import http_util
from tensorboard.data import provider
from tensorboard.plugins.npmi import metadata
_DEFAULT_DOWNSAMPLING = 1 # nPMI tensors per time series
def _error_response(request, error_message):
return http_util.Respond(
request,
{"error": error_message},
"application/json",
code=400,
)
def _missing_run_error_response(request):
return _error_response(request, "run parameter is not provided")
# Convert all NaNs in a multidimensional list to None
def convert_nan_none(arr):
return [
convert_nan_none(e)
if isinstance(e, list)
else None
if math.isnan(e)
else e
for e in arr
]
class NpmiPlugin(base_plugin.TBPlugin):
"""nPMI Plugin for Tensorboard."""
plugin_name = metadata.PLUGIN_NAME
def __init__(self, context):
"""Instantiates the nPMI Plugin via Tensorboard core.
Args:
context: A base_plugin.TBContext instance.
"""
super(NpmiPlugin, self).__init__(context)
self._logdir = context.logdir
self._downsample_to = (context.sampling_hints or {}).get(
self.plugin_name, _DEFAULT_DOWNSAMPLING
)
self._data_provider = context.data_provider
self._version_checker = plugin_util._MetadataVersionChecker(
data_kind="nPMI",
latest_known_version=0,
)
def get_plugin_apps(self):
return {
"/tags": self.serve_tags,
"/annotations": self.serve_annotations,
"/metrics": self.serve_metrics,
"/values": self.serve_values,
"/embeddings": self.serve_embeddings,
}
def is_active(self):
"""Determines whether this plugin is active.
This plugin is only active if TensorBoard sampled any npmi summaries.
Returns:
Whether this plugin is active.
"""
return False # `list_plugins` as called by TB core suffices
def frontend_metadata(self):
return base_plugin.FrontendMetadata(
is_ng_component=True, tab_name="npmi", disable_reload=True
)
def tags_impl(self, ctx, experiment):
mapping = self._data_provider.list_tensors(
ctx, experiment_id=experiment, plugin_name=self.plugin_name
)
result = {run: {} for run in mapping}
for (run, tag_to_content) in mapping.items():
result[run] = []
for (tag, metadatum) in tag_to_content.items():
md = metadata.parse_plugin_metadata(metadatum.plugin_content)
if not self._version_checker.ok(md.version, run, tag):
continue
content = metadata.parse_plugin_metadata(
metadatum.plugin_content
)
result[run].append(tag)
return result
def annotations_impl(self, ctx, experiment):
mapping = self._data_provider.list_tensors(
ctx,
experiment_id=experiment,
plugin_name=self.plugin_name,
run_tag_filter=provider.RunTagFilter(
tags=[metadata.ANNOTATIONS_TAG]
),
)
result = {run: {} for run in mapping}
for (run, _) in mapping.items():
all_annotations = self._data_provider.read_tensors(
ctx,
experiment_id=experiment,
plugin_name=self.plugin_name,
run_tag_filter=provider.RunTagFilter(
runs=[run], tags=[metadata.ANNOTATIONS_TAG]
),
downsample=self._downsample_to,
)
annotations = all_annotations.get(run, {}).get(
metadata.ANNOTATIONS_TAG, {}
)
event_data = [
annotation.decode("utf-8")
for annotation in annotations[0].numpy
]
result[run] = event_data
return result
def metrics_impl(self, ctx, experiment):
mapping = self._data_provider.list_tensors(
ctx,
experiment_id=experiment,
plugin_name=self.plugin_name,
run_tag_filter=provider.RunTagFilter(tags=[metadata.METRICS_TAG]),
)
result = {run: {} for run in mapping}
for (run, _) in mapping.items():
all_metrics = self._data_provider.read_tensors(
ctx,
experiment_id=experiment,
plugin_name=self.plugin_name,
run_tag_filter=provider.RunTagFilter(
runs=[run], tags=[metadata.METRICS_TAG]
),
downsample=self._downsample_to,
)
metrics = all_metrics.get(run, {}).get(metadata.METRICS_TAG, {})
event_data = [metric.decode("utf-8") for metric in metrics[0].numpy]
result[run] = event_data
return result
def values_impl(self, ctx, experiment):
mapping = self._data_provider.list_tensors(
ctx,
experiment_id=experiment,
plugin_name=self.plugin_name,
run_tag_filter=provider.RunTagFilter(tags=[metadata.VALUES_TAG]),
)
result = {run: {} for run in mapping}
for (run, _) in mapping.items():
all_values = self._data_provider.read_tensors(
ctx,
experiment_id=experiment,
plugin_name=self.plugin_name,
run_tag_filter=provider.RunTagFilter(
runs=[run], tags=[metadata.VALUES_TAG]
),
downsample=self._downsample_to,
)
values = all_values.get(run, {}).get(metadata.VALUES_TAG, {})
event_data = values[0].numpy.tolist()
event_data = convert_nan_none(event_data)
result[run] = event_data
return result
def embeddings_impl(self, ctx, experiment):
mapping = self._data_provider.list_tensors(
ctx,
experiment_id=experiment,
plugin_name=self.plugin_name,
run_tag_filter=provider.RunTagFilter(
tags=[metadata.EMBEDDINGS_TAG]
),
)
result = {run: {} for run in mapping}
for (run, _) in mapping.items():
all_embeddings = self._data_provider.read_tensors(
ctx,
experiment_id=experiment,
plugin_name=self.plugin_name,
run_tag_filter=provider.RunTagFilter(
runs=[run], tags=[metadata.EMBEDDINGS_TAG]
),
downsample=self._downsample_to,
)
embeddings = all_embeddings.get(run, {}).get(
metadata.EMBEDDINGS_TAG, {}
)
event_data = embeddings[0].numpy.tolist()
result[run] = event_data
return result
@wrappers.Request.application
def serve_tags(self, request):
ctx = plugin_util.context(request.environ)
experiment = plugin_util.experiment_id(request.environ)
contents = self.tags_impl(ctx, experiment=experiment)
return http_util.Respond(request, contents, "application/json")
@wrappers.Request.application
def serve_annotations(self, request):
ctx = plugin_util.context(request.environ)
experiment = plugin_util.experiment_id(request.environ)
contents = self.annotations_impl(ctx, experiment=experiment)
return http_util.Respond(request, contents, "application/json")
@wrappers.Request.application
def serve_metrics(self, request):
ctx = plugin_util.context(request.environ)
experiment = plugin_util.experiment_id(request.environ)
contents = self.metrics_impl(ctx, experiment=experiment)
return http_util.Respond(request, contents, "application/json")
@wrappers.Request.application
def serve_values(self, request):
ctx = plugin_util.context(request.environ)
experiment = plugin_util.experiment_id(request.environ)
contents = self.values_impl(ctx, experiment=experiment)
return http_util.Respond(request, contents, "application/json")
@wrappers.Request.application
def serve_embeddings(self, request):
ctx = plugin_util.context(request.environ)
experiment = plugin_util.experiment_id(request.environ)
contents = self.embeddings_impl(ctx, experiment=experiment)
return http_util.Respond(request, contents, "application/json")
|
tensorflow/tensorboard
|
tensorboard/plugins/npmi/npmi_plugin.py
|
Python
|
apache-2.0
| 9,407
|
#!/usr/bin/env python
class Config(object):
DEBUG=False
WRITE_PICTURE_DEBUG=False
WRITE_PICTURE_DEBUG_PATH='./debug_picture/'
FACE_MAX_DRIFT_PERCENT=0.5
MAX_IMAGE_WIDTH=1024
# dlib tracking takes longer time with a large variations
# 20ms ~ 100+ ms
DLIB_TRACKING=False
# whether detector should upsample
# detection with upsample = 1 on a 640x480 image took around 200ms
# detection with upsample = 0 on a 640x480 image took around 70ms
DLIB_DETECTOR_UPSAMPLE_TIMES=0
# adjust face detector threshold, a negative number lower the threshold
DLIB_DETECTOR_ADJUST_THRESHOLD=0
# profile face detection
DETECT_PROFILE_FACE=False
# profile face cascade opencv xml path
OPENCV_PROFILE_FACE_CASCADE_PATH=None
# blurry detection
IMAGE_CLEAR_THRESHOLD=40
# return data format
RETURN_FACE_DATA=True
# an arbitrary probability for cutting of openface recognition true/false
RECOG_PROB_THRESHOLD=0.5
|
cmusatyalab/faceswap
|
server/demo_config.py
|
Python
|
apache-2.0
| 991
|
class Solution:
def gameOfLife(self, board: List[List[int]]) -> None:
"""
Do not return anything, modify board in-place instead.
"""
# Handling empty board.
if not board or len(board[0]) == 0:
return
# Gets dimensions of board.
m, n = len(board), len(board[0])
print(m,n)
# Iterate through the board and generate populated list!
for i,row in enumerate(board):
for j,ele in enumerate(row):
#Sum of the element
count = 0
for r in range(max(0,i-1),min(m, i+2)):
for c in range(max(0,j-1), min(n, j+2)):
if (r,c) != (i,j) and (1 <= board[r][c] <= 2):
count += 1
# Previously 0 element turned to be 1 is represented as 3 here. [Point 4]
# Previously 1 element turned to 0 is represented as 2 here. [Point 2 or 3]
# Processing that particular point!
if board[i][j] == 0:
if count == 3:
board[i][j] = 3
elif count < 2 or count > 3:
board[i][j] = 2
# Final formatting
for i in range(m):
for j in range(n):
if board[i][j] == 3:
board[i][j] = 1
elif board[i][j] == 2:
board[i][j] = 0
|
saisankargochhayat/algo_quest
|
leetcode/289. Game of Life/soln.py
|
Python
|
apache-2.0
| 1,474
|
"""
Manage OpenStack configuration file settings.
:maintainer: Jeffrey C. Ollie <jeff@ocjtech.us>
:maturity: new
:depends:
:platform: linux
"""
from salt.exceptions import CommandExecutionError
def __virtual__():
"""
Only load if the openstack_config module is in __salt__
"""
if "openstack_config.get" not in __salt__:
return (False, "openstack_config module could not be loaded")
if "openstack_config.set" not in __salt__:
return False
if "openstack_config.delete" not in __salt__:
return False
return True
def present(name, filename, section, value, parameter=None):
"""
Ensure a value is set in an OpenStack configuration file.
filename
The full path to the configuration file
section
The section in which the parameter will be set
parameter (optional)
The parameter to change. If the parameter is not supplied, the name will be used as the parameter.
value
The value to set
"""
if parameter is None:
parameter = name
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
try:
old_value = __salt__["openstack_config.get"](
filename=filename, section=section, parameter=parameter
)
if old_value == value:
ret["result"] = True
ret["comment"] = "The value is already set to the correct value"
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Value '{}' is set to be changed to '{}'.".format(
old_value, value
)
return ret
except CommandExecutionError as err:
if not str(err).lower().startswith("parameter not found:"):
raise
__salt__["openstack_config.set"](
filename=filename, section=section, parameter=parameter, value=value
)
ret["changes"] = {"Value": "Updated"}
ret["result"] = True
ret["comment"] = "The value has been updated"
return ret
def absent(name, filename, section, parameter=None):
"""
Ensure a value is not set in an OpenStack configuration file.
filename
The full path to the configuration file
section
The section in which the parameter will be set
parameter (optional)
The parameter to change. If the parameter is not supplied, the name will be used as the parameter.
"""
if parameter is None:
parameter = name
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
try:
old_value = __salt__["openstack_config.get"](
filename=filename, section=section, parameter=parameter
)
except CommandExecutionError as err:
if str(err).lower().startswith("parameter not found:"):
ret["result"] = True
ret["comment"] = "The value is already absent"
return ret
raise
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Value '{}' is set to be deleted.".format(old_value)
return ret
__salt__["openstack_config.delete"](
filename=filename, section=section, parameter=parameter
)
ret["changes"] = {"Value": "Deleted"}
ret["result"] = True
ret["comment"] = "The value has been deleted"
return ret
|
saltstack/salt
|
salt/states/openstack_config.py
|
Python
|
apache-2.0
| 3,339
|
#!/usr/bin/env python
"""Module containing functions for converting messages to dataframe."""
import collections
import datetime
import stat
from typing import Text, Sequence, List, Any, Dict, Optional
import pandas as pd
from google.protobuf import descriptor
from google.protobuf import message
from grr_response_proto import osquery_pb2
from grr_response_proto import semantic_pb2
def from_sequence(seq: Sequence[Any]) -> pd.DataFrame:
"""Converts sequence of objects to a dataframe.
Args:
seq: Sequence of objects to convert.
Returns:
Pandas dataframe representing given sequence of objects.
"""
dframes = [from_object(obj) for obj in seq]
if not dframes:
return pd.DataFrame()
return pd.concat(dframes, ignore_index=True, sort=False)
def from_object(obj: Any) -> pd.DataFrame:
"""Converts object to a dataframe.
Args:
obj: Object to convert.
Returns:
Pandas dataframe representing given object.
"""
if isinstance(obj, message.Message):
return from_message(obj)
return pd.DataFrame(data=[obj])
def from_message(msg: message.Message,
components: Optional[List[Text]] = None) -> pd.DataFrame:
"""Converts protobuf message to a dataframe.
Args:
msg: Protobuf message to convert.
components: Prefixes for column names.
Returns:
Pandas dataframe representing given message.
"""
if components is None:
components = []
data = {}
for desc, value in msg.ListFields():
if isinstance(value, message.Message):
data.update(from_message(value, components + [desc.name]))
else:
data.update(_get_pretty_value(value, desc, components))
return pd.DataFrame(data=data)
def from_osquery_table(table: osquery_pb2.OsqueryTable) -> pd.DataFrame:
"""Converts osquery table to a dataframe.
Args:
table: Table to convert.
Returns:
Pandas dataframe representing given osquery table.
"""
columns = [column.name for column in table.header.columns]
data = {column: [] for column in columns}
for row in table.rows:
for column, value in zip(columns, row.values):
data[column].append(value)
return pd.DataFrame(data=data)
def _get_pretty_value(value: Any, desc: descriptor.FieldDescriptor,
components: List[Text]) -> Dict[Text, List[Any]]:
"""Converts value to the object easier to work with or more representative.
Args:
value: Object to transform.
desc: Field descriptor of a value.
components: Prefixes for column names.
Returns:
Data dictionary representing the given value.
"""
data = {}
column_name = '.'.join(components + [desc.name])
sem_type = semantic_pb2.sem_type
if desc.label == desc.LABEL_REPEATED:
data[column_name] = [from_sequence(value)]
elif desc.type == desc.TYPE_ENUM:
char_name = next(_.name for _ in desc.enum_type.values if _.number == value)
data[column_name] = [char_name]
elif desc.type == desc.TYPE_BYTES:
data[column_name] = [value]
data[column_name + '.pretty'] = [repr(value)]
elif desc.GetOptions().Extensions[sem_type].type == 'RDFDatetime':
data[column_name] = [value]
pretty_value = datetime.datetime.utcfromtimestamp(value / (10**6))
data[column_name + '.pretty'] = [pretty_value]
elif desc.GetOptions().Extensions[sem_type].type == 'StatMode':
data[column_name] = [value]
data[column_name + '.pretty'] = [stat.filemode(value)]
else:
data[column_name] = [value]
return data
def reindex_dataframe(
df: pd.DataFrame,
priority_columns: Optional[List[Text]] = None,
ignore_columns: Optional[List[Text]] = None) -> pd.DataFrame:
"""Reorders and removes dataframe columns according to the given priorities.
Args:
df: Dataframe to reorder columns in.
priority_columns: List of first columns in a new dataframe.
ignore_columns: List of columns to remove from a dataframe.
Returns:
Reordered dataframe.
"""
if priority_columns is None:
priority_columns = []
if ignore_columns is None:
ignore_columns = []
priorities = collections.defaultdict(lambda: len(priority_columns))
for idx, column in enumerate(priority_columns):
priorities[column] = idx
ignore_columns = set(ignore_columns)
columns = [_ for _ in df.columns if _ not in ignore_columns]
columns = sorted(columns, key=lambda _: priorities[_])
return df.reindex(columns=columns)
def add_pretty_column(df: pd.DataFrame, col_name: Text,
values: Sequence[Any]) -> pd.DataFrame:
"""Adds pretty column for the specified column name with values provided.
Args:
df: Dataframe to add column to.
col_name: Name of the original column.
values: Values of the pretty column to add.
Returns:
Dataframe with the pretty column added.
"""
if col_name not in df.columns:
return df
pretty_col_name = '{}.pretty'.format(col_name)
if pretty_col_name in df.columns:
df[pretty_col_name] = values
else:
df.insert(
df.columns.get_loc(col_name) + 1, pretty_col_name, pd.Series(values))
return df
|
google/grr
|
colab/grr_colab/convert.py
|
Python
|
apache-2.0
| 5,072
|
"""Provides checkpoint download helpers."""
import hashlib
import os
from typing import Optional
import urllib
from absl import logging
from tensorflow.io import gfile
import tqdm
DEFAULT_DOWNLOAD_DIR = os.path.expanduser('~/.cache/scenic/clip')
def hash_file(path):
return hashlib.sha256(gfile.GFile(path, 'rb').read()).hexdigest()
def download(
url: str,
root: str = DEFAULT_DOWNLOAD_DIR,
expected_sha256: Optional[str] = None
):
"""Download a file if it does not exist, with a progress bar.
Based on https://github.com/openai/CLIP/blob/main/clip/clip.py#L4
Args:
url (str): URL of file to download.
root (str): Directory to place the downloaded file.
expected_sha256: Optional sha256 sum. If provided, checks downloaded file.
Raises:
RuntimeError: Downloaded file existed as a directory, or sha256 of dowload
does not match expected_sha256.
Returns:
download_target (str): path to downloaded file
"""
gfile.makedirs(root)
filename = os.path.basename(url)
if '?' in filename:
# strip trailing HTTP GET arguments
filename = filename[:filename.rindex('?')]
download_target = os.path.join(root, filename)
if gfile.exists(download_target):
if gfile.isdir(download_target):
raise RuntimeError(f'{download_target} exists and is not a regular file')
elif expected_sha256:
if hash_file(download_target) == expected_sha256:
return download_target
logging.warning('%s exists, but the SHA256 checksum does not match;'
're-downloading the file', download_target)
with gfile.GFile(download_target, 'wb') as output:
with urllib.request.urlopen(url) as source:
loop = tqdm.tqdm(total=int(source.info().get('Content-Length')),
ncols=80, unit='iB', unit_scale=True, unit_divisor=1024)
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if expected_sha256 and hash_file(download_target) != expected_sha256:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match')
return download_target
|
google-research/scenic
|
scenic/projects/baselines/clip/download.py
|
Python
|
apache-2.0
| 2,213
|
"""
.. module: security_monkey.watcher
:platform: Unix
:synopsis: Slurps the current config from AWS and compares it to what has previously
been recorded in the database to find any changes.
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com> @monkeysecurity
"""
from botocore.exceptions import ClientError
from common.PolicyDiff import PolicyDiff
from common.utils import sub_dict
from security_monkey import app
from security_monkey.datastore import Account, IgnoreListEntry, Technology, store_exception
from security_monkey.common.jinja import get_jinja_env
from boto.exception import BotoServerError
import time
import datastore
from copy import deepcopy
import dpath.util
from dpath.exceptions import PathNotFound
class Watcher(object):
"""Slurps the current config from AWS and compares it to what has previously
been recorded in the database to find any changes."""
index = 'abstract'
i_am_singular = 'Abstract'
i_am_plural = 'Abstracts'
rate_limit_delay = 0
ignore_list = []
interval = 15 # in minutes
def __init__(self, accounts=None, debug=False):
"""Initializes the Watcher"""
self.datastore = datastore.Datastore()
if not accounts:
accounts = Account.query.filter(Account.third_party==False).filter(Account.active==True).all()
self.accounts = [account.name for account in accounts]
else:
self.accounts = accounts
self.debug = debug
self.created_items = []
self.deleted_items = []
self.changed_items = []
self.ephemeral_items = []
# TODO: grab these from DB, keyed on account
self.rate_limit_delay = 0
self.interval = 15
self.honor_ephemerals = False
self.ephemeral_paths = []
def prep_for_slurp(self):
"""
Should be run before slurp is run to grab the IgnoreList.
"""
query = IgnoreListEntry.query
query = query.join((Technology, Technology.id == IgnoreListEntry.tech_id))
self.ignore_list = query.filter(Technology.name == self.index).all()
def check_ignore_list(self, name):
"""
See if the given item has a name flagging it to be ignored by security_monkey.
"""
for result in self.ignore_list:
# Empty prefix comes back as None instead of an empty string ...
prefix = result.prefix or ""
if name.lower().startswith(prefix.lower()):
app.logger.warn("Ignoring {}/{} because of IGNORELIST prefix {}".format(self.index, name, result.prefix))
return True
return False
def wrap_aws_rate_limited_call(self, awsfunc, *args, **nargs):
attempts = 0
def increase_delay():
if self.rate_limit_delay == 0:
self.rate_limit_delay = 1
app.logger.warn(('Being rate-limited by AWS. Increasing delay on tech {} ' +
'in account {} from 0 to 1 second. Attempt {}')
.format(self.index, self.accounts, attempts))
elif self.rate_limit_delay < 4:
self.rate_limit_delay = self.rate_limit_delay * 2
app.logger.warn(('Still being rate-limited by AWS. Increasing delay on tech {} ' +
'in account {} to {} seconds. Attempt {}')
.format(self.index, self.accounts, self.rate_limit_delay, attempts))
else:
app.logger.warn(('Still being rate-limited by AWS. Keeping delay on tech {} ' +
'in account {} at {} seconds. Attempt {}')
.format(self.index, self.accounts, self.rate_limit_delay, attempts))
while True:
attempts = attempts + 1
try:
if self.rate_limit_delay > 0:
time.sleep(self.rate_limit_delay)
retval = awsfunc(*args, **nargs)
if self.rate_limit_delay > 0:
app.logger.warn("Successfully Executed Rate-Limited Function. "
"Tech: {} Account: {}. Removing sleep period."
.format(self.index, self.accounts))
self.rate_limit_delay = 0
return retval
except BotoServerError as e: # Boto
if not e.error_code == 'Throttling':
raise e
increase_delay()
except ClientError as e: # Botocore
if not e.response["Error"]["Code"] == "Throttling":
raise e
increase_delay()
def created(self):
"""
Used by the Jinja templates
:returns: True if created_items is not empty
:returns: False otherwise.
"""
return len(self.created_items) > 0
def deleted(self):
"""
Used by the Jinja templates
:returns: True if deleted_items is not empty
:returns: False otherwise.
"""
return len(self.deleted_items) > 0
def changed(self):
"""
Used by the Jinja templates
:returns: True if changed_items is not empty
:returns: False otherwise.
"""
return len(self.changed_items) > 0
def slurp(self):
"""
method to slurp configuration from AWS for whatever it is that I'm
interested in. This will be overridden for each technology.
"""
raise NotImplementedError()
def slurp_exception(self, location=None, exception=None, exception_map={}, source="watcher"):
"""
Logs any exceptions that happen in slurp and adds them to the exception_map
using their location as the key. The location is a tuple in the form:
(technology, account, region, item_name) that describes the object where the exception occurred.
Location can also exclude an item_name if the exception is region wide.
"""
if location in exception_map:
app.logger.debug("Exception map already has location {}. This should not happen.".format(location))
exception_map[location] = exception
app.logger.debug("Adding {} to the exceptions list. Exception was: {}".format(location, str(exception)))
# Store it to the database:
store_exception(source, location, exception)
def location_in_exception_map(self, item_location, exception_map={}):
"""
Determines whether a given location is covered by an exception already in the
exception map.
Item location: (self.index, self.account, self.region, self.name)
exception Maps: (index, account, region, name)
(index, account, region)
(index, account)
:returns: True if location is covered by an entry in the exception map.
:returns: False if location is not covered by an entry in the exception map.
"""
# Exact Match
if item_location in exception_map:
app.logger.debug("Skipping {} due to an item-level exception {}.".format(item_location, exception_map[item_location]))
return True
# (index, account, region)
if item_location[0:3] in exception_map:
app.logger.debug("Skipping {} due to a region-level exception {}.".format(item_location, exception_map[item_location[0:3]]))
return True
# (index, account)
if item_location[0:2] in exception_map:
app.logger.debug("Skipping {} due to an account-level exception {}.".format(item_location, exception_map[item_location[0:2]]))
return True
# (index)
if item_location[0:1] in exception_map:
app.logger.debug("Skipping {} due to a technology-level exception {}.".format(item_location, exception_map[item_location[0:1]]))
return True
return False
def find_deleted(self, previous=[], current=[], exception_map={}):
"""
Find any items that have been deleted since the last run of the watcher.
Add these items to the deleted_items list.
"""
prev_map = {item.location(): item for item in previous}
curr_map = {item.location(): item for item in current}
item_locations = list(set(prev_map).difference(set(curr_map)))
item_locations = [item_location for item_location in item_locations if not self.location_in_exception_map(item_location, exception_map)]
list_deleted_items = [prev_map[item] for item in item_locations]
for item in list_deleted_items:
deleted_change_item = ChangeItem.from_items(old_item=item, new_item=None)
app.logger.debug("%s: %s/%s/%s deleted" % (self.i_am_singular, item.account, item.region, item.name))
self.deleted_items.append(deleted_change_item)
def find_new(self, previous=[], current=[]):
"""
Find any new objects that have been created since the last run of the watcher.
Add these items to the created_items list.
"""
prev_map = {item.location(): item for item in previous}
curr_map = {item.location(): item for item in current}
item_locations = list(set(curr_map).difference(set(prev_map)))
list_new_items = [curr_map[item] for item in item_locations]
for item in list_new_items:
new_change_item = ChangeItem.from_items(old_item=None, new_item=item)
self.created_items.append(new_change_item)
app.logger.debug("%s: %s/%s/%s created" % (self.i_am_singular, item.account, item.region, item.name))
def find_modified(self, previous=[], current=[], exception_map={}):
"""
Find any objects that have been changed since the last run of the watcher.
Add these items to the changed_items list.
"""
prev_map = {item.location(): item for item in previous}
curr_map = {item.location(): item for item in current}
item_locations = list(set(curr_map).intersection(set(prev_map)))
item_locations = [item_location for item_location in item_locations if not self.location_in_exception_map(item_location, exception_map)]
for location in item_locations:
prev_item = prev_map[location]
curr_item = curr_map[location]
# ChangeItem with and without ephemeral changes
eph_change_item = None
dur_change_item = None
if not sub_dict(prev_item.config) == sub_dict(curr_item.config):
eph_change_item = ChangeItem.from_items(old_item=prev_item, new_item=curr_item)
if self.ephemerals_skipped():
# deepcopy configs before filtering
dur_prev_item = deepcopy(prev_item)
dur_curr_item = deepcopy(curr_item)
# filter-out ephemeral paths in both old and new config dicts
for path in self.ephemeral_paths:
for cfg in [dur_prev_item.config, dur_curr_item.config]:
try:
dpath.util.delete(cfg, path, separator='$')
except PathNotFound:
pass
# now, compare only non-ephemeral paths
if not sub_dict(dur_prev_item.config) == sub_dict(dur_curr_item.config):
dur_change_item = ChangeItem.from_items(old_item=dur_prev_item, new_item=dur_curr_item)
# store all changes, divided in specific categories
if eph_change_item:
self.ephemeral_items.append(eph_change_item)
app.logger.debug("%s: ephemeral changes in item %s/%s/%s" % (self.i_am_singular, eph_change_item.account, eph_change_item.region, eph_change_item.name))
if dur_change_item:
self.changed_items.append(dur_change_item)
app.logger.debug("%s: durable changes in item %s/%s/%s" % (self.i_am_singular, dur_change_item.account, dur_change_item.region, dur_change_item.name))
elif eph_change_item is not None:
# store all changes, handle them all equally
self.changed_items.append(eph_change_item)
app.logger.debug("%s: changes in item %s/%s/%s" % (self.i_am_singular, eph_change_item.account, eph_change_item.region, eph_change_item.name))
def find_changes(self, current=[], exception_map={}):
"""
Identify changes between the configuration I have and what I had
last time the watcher ran.
This ignores any account/region which caused an exception during slurp.
"""
prev = self.read_previous_items()
self.find_deleted(previous=prev, current=current, exception_map=exception_map)
self.find_new(previous=prev, current=current)
self.find_modified(previous=prev, current=current, exception_map=exception_map)
def read_previous_items(self):
"""
Pulls the last-recorded configuration from the database.
:return: List of all items for the given technology and the given account.
"""
prev_list = []
for account in self.accounts:
prev = self.datastore.get_all_ctype_filtered(tech=self.index, account=account, include_inactive=False)
# Returns a map of {Item: ItemRevision}
for item in prev:
item_revision = prev[item]
new_item = ChangeItem(index=self.index,
region=item.region,
account=item.account.name,
name=item.name,
new_config=item_revision.config)
prev_list.append(new_item)
return prev_list
def is_changed(self):
"""
Note: It is intentional that self.ephemeral_items is not included here
so that emails will not go out about those changes.
Those changes will still be recorded in the database and visible in the UI.
:return: boolean whether or not we've found any changes
"""
return self.deleted_items or self.created_items or self.changed_items
def issues_found(self):
"""
Runs through any changed items to see if any have issues.
:return: boolean whether any changed items have issues
"""
has_issues = False
has_new_issue = False
has_unjustified_issue = False
for item in self.created_items + self.changed_items:
if item.audit_issues:
has_issues = True
if item.found_new_issue:
has_new_issue = True
has_unjustified_issue = True
break
for issue in item.confirmed_existing_issues:
if not issue.justified:
has_unjustified_issue = True
break
return has_issues, has_new_issue, has_unjustified_issue
def save(self):
"""
save new configs, if necessary
"""
app.logger.info("{} deleted {} in {}".format(len(self.deleted_items), self.i_am_plural, self.accounts))
app.logger.info("{} created {} in {}".format(len(self.created_items), self.i_am_plural, self.accounts))
for item in self.created_items + self.deleted_items:
item.save(self.datastore)
if self.ephemerals_skipped():
changed_locations = [item.location() for item in self.changed_items]
new_item_revisions = [item for item in self.ephemeral_items if item.location() in changed_locations]
app.logger.info("{} changed {} in {}".format(len(new_item_revisions), self.i_am_plural, self.accounts))
for item in new_item_revisions:
item.save(self.datastore)
edit_item_revisions = [item for item in self.ephemeral_items if item.location() not in changed_locations]
app.logger.info("{} ephemerally changed {} in {}".format(len(edit_item_revisions), self.i_am_plural, self.accounts))
for item in edit_item_revisions:
item.save(self.datastore, ephemeral=True)
else:
app.logger.info("{} changed {} in {}".format(len(self.changed_items), self.i_am_plural, self.accounts))
for item in self.changed_items:
item.save(self.datastore)
def plural_name(self):
"""
Used for Jinja Template
:return: i_am_plural
"""
return self.i_am_plural
def singular_name(self):
"""
Used for Jinja Template
:return: i_am_singular
"""
return self.i_am_singular
def get_interval(self):
""" Returns interval time (in minutes) """
return self.interval
def ephemerals_skipped(self):
""" Returns whether ephemerals locations are ignored """
return self.honor_ephemerals
class ChangeItem(object):
"""
Object tracks two different revisions of a given item.
"""
def __init__(self, index=None, region=None, account=None, name=None, arn=None, old_config={}, new_config={}, active=False, audit_issues=None):
self.index = index
self.region = region
self.account = account
self.name = name
self.arn = arn
self.old_config = old_config
self.new_config = new_config
self.active = active
self.audit_issues = audit_issues or []
self.confirmed_new_issues = []
self.confirmed_fixed_issues = []
self.confirmed_existing_issues = []
self.found_new_issue = False
@classmethod
def from_items(cls, old_item=None, new_item=None):
"""
Create ChangeItem from two separate items.
:return: An instance of ChangeItem
"""
if not old_item and not new_item:
return
valid_item = new_item if new_item else old_item
active = True if new_item else False
old_config = old_item.config if old_item else {}
new_config = new_item.config if new_item else {}
return cls(index=valid_item.index,
region=valid_item.region,
account=valid_item.account,
name=valid_item.name,
arn=valid_item.arn,
old_config=old_config,
new_config=new_config,
active=active,
audit_issues=valid_item.audit_issues)
@property
def config(self):
return self.new_config
def location(self):
"""
Construct a location from the object.
:return: tuple containing index, account, region, and name.
"""
return (self.index, self.account, self.region, self.name)
def get_pdiff_html(self):
pdiff = PolicyDiff(self.new_config, self.old_config)
return pdiff.produceDiffHTML()
def _dict_for_template(self):
return {
'account': self.account,
'region': self.region,
'name': self.name,
'confirmed_new_issues': self.confirmed_new_issues,
'confirmed_fixed_issues': self.confirmed_fixed_issues,
'confirmed_existing_issues': self.confirmed_existing_issues,
'pdiff_html': self.get_pdiff_html()
}
def description(self):
"""
Provide an HTML description of the object for change emails and the Jinja templates.
:return: string of HTML desribing the object.
"""
jenv = get_jinja_env()
template = jenv.get_template('jinja_change_item.html')
body = template.render(self._dict_for_template())
# app.logger.info(body)
return body
def save(self, datastore, ephemeral=False):
"""
Save the item
"""
app.logger.debug("Saving {}/{}/{}/{}\n\t{}".format(self.index, self.account, self.region, self.name, self.new_config))
datastore.store(
self.index,
self.region,
self.account,
self.name,
self.active,
self.new_config,
arn=self.arn,
new_issues=self.audit_issues,
ephemeral=ephemeral)
|
bunjiboys/security_monkey
|
security_monkey/watcher.py
|
Python
|
apache-2.0
| 20,454
|
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from abc import ABCMeta
from six.moves import configparser
from intern.service.mesh.service import MeshService, VoxelUnits
import os
CONFIG_FILE ='~/.intern/intern.cfg'
@six.add_metaclass(ABCMeta)
class Remote(object):
"""Base class for communicating with remote data stores.
Attributes:
_volume (intern.service.Service): Class that communicates with the volume service.
_metadata (intern.service.Service): Class that communicates with the metadata service.
_project (intern.service.Service): Class that communicates with the project service.
_object (intern.service.Service): Class that communicates with the object service.
"""
def __init__(self, cfg_file_or_dict=None):
"""Constructor.
Loads credentials in order from user provided dictionary > user provided file > default file > environment vars
Args:
cfg_file_or_dict (optional[str|dict]): Path to config file in INI format or a dict of config parameters.
"""
# Service Objects
self._volume = None
self._metadata = None
self._project = None
self._object = None
self._mesh = None
# Configuration data loaded from file or passed directly to the constructor
# Is available for children Remote classes to use as needed
self._config = None
# Tokens for Services
self._token_project = None
self._token_metadata = None
self._token_volume = None
self._token_object = None
if cfg_file_or_dict is None:
# Default to the config file in the user directory if no config file was provided
cfg_file_or_dict = os.path.expanduser(CONFIG_FILE)
if isinstance(cfg_file_or_dict, dict):
# A config dictionary was provided directly. Keep things consistent by creating an INI string.
cfg_str = "[Default]\n"
for key in cfg_file_or_dict:
cfg_str = "{}{} = {}\n".format(cfg_str, key, cfg_file_or_dict[key])
self._config = self.load_config_file(six.StringIO(cfg_str))
else:
# A file path was provided by the user
if os.path.isfile(os.path.expanduser(cfg_file_or_dict)):
with open(os.path.expanduser(cfg_file_or_dict), 'r') as cfg_file_handle:
self._config = self.load_config_file(cfg_file_handle)
else:
# Provided file or default file do not exist. Try loading from env variables
if "INTERN_PROTOCOL" in os.environ and "INTERN_HOST" in os.environ and "INTERN_TOKEN" in os.environ:
cfg_str = "[Default]\n"
cfg_str = "{}{} = {}\n".format(cfg_str, "protocol", os.environ['INTERN_PROTOCOL'])
cfg_str = "{}{} = {}\n".format(cfg_str, "host", os.environ['INTERN_HOST'])
cfg_str = "{}{} = {}\n".format(cfg_str, "token", os.environ['INTERN_TOKEN'])
self._config = self.load_config_file(six.StringIO(cfg_str))
else:
raise IOError("Configuration file not found: {}. Please provide credential file or set environment variables".format(cfg_file_or_dict))
self._init_mesh_service()
def load_config_file(self, config_handle):
"""Load config data for the Remote.
Args:
config_handle (io.StringIO): Config data encoded in a string.
Returns:
(configparser.ConfigParser)
"""
cfg_parser = configparser.ConfigParser()
cfg_parser.read_file(config_handle)
return cfg_parser
def _init_mesh_service(self):
"""
Method to initialize the Mesh Service
Args:
None
Returns:
None
Raises:
(KeyError): if given invalid version.
"""
self._mesh = MeshService()
@property
def volume_service(self):
return self._volume
@property
def project_service(self):
return self._project
@property
def metadata_service(self):
return self._metadata
@property
def object_service(self):
return self._object
@property
def mesh_service(self):
return self._mesh
def list_project(self, **kwargs):
"""Perform list operation on the project.
What this does is highly dependent on project's data model.
Args:
(**kwargs): Args are implementation dependent.
Returns:
(list)
"""
return self._project.list(**kwargs)
def get_cutout(self, resource, resolution, x_range, y_range, z_range, time_range=None, id_list=[], parallel: bool= True, **kwargs):
"""Get a cutout from the volume service.
Args:
resource (intern.resource.Resource): Resource compatible with cutout operations.
resolution (int): 0 indicates native resolution.
x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.
y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.
z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.
time_range (optional [list[int]]): time range such as [30, 40] which means t>=30 and t<40.
id_list (optional [list]): list of object ids to filter the cutout by.
parallel (bool: True): Whether downloads should be parallelized using multiprocessing
Returns:
(): Return type depends on volume service's implementation.
Raises:
RuntimeError when given invalid resource.
Other exceptions may be raised depending on the volume service's implementation.
"""
if not resource.valid_volume():
raise RuntimeError('Resource incompatible with the volume service.')
return self._volume.get_cutout(
resource, resolution,
x_range, y_range, z_range, time_range,
id_list, parallel = parallel, **kwargs
)
def create_cutout(self, resource, resolution, x_range, y_range, z_range, data, time_range=None):
"""Upload a cutout to the volume service.
Args:
resource (intern.resource.Resource): Resource compatible with cutout operations.
resolution (int): 0 indicates native resolution.
x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.
y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.
z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.
data (object): Type depends on implementation.
time_range (optional [list[int]]): time range such as [30, 40] which means t>=30 and t<40.
Returns:
(): Return type depends on volume service's implementation.
Raises:
RuntimeError when given invalid resource.
Other exceptions may be raised depending on the volume service's implementation.
"""
if not resource.valid_volume():
raise RuntimeError('Resource incompatible with the volume service.')
return self._volume.create_cutout(
resource, resolution, x_range, y_range, z_range, data, time_range)
def reserve_ids(self, resource, num_ids):
"""Reserve a block of unique, sequential ids for annotations.
Args:
resource (intern.resource.Resource): Resource compatible with annotation operations.
num_ids (int): Number of ids to reserve.
Returns:
(int): First id reserved.
"""
if not resource.valid_volume():
raise RuntimeError('Resource incompatible with the volume service.')
return self._volume.reserve_ids(resource, num_ids)
def get_extents(self, resource):
"""Get extents of data volume
Args:
resource (intern.resource.Resource): Data platform resource.
Returns:
extents (array): [[x-min, max-x], [y-min, max-y], [z-min, max-z]]
"""
return self._metadata.get_extents(resource)
def get_bounding_box(self, resource, resolution, id, bb_type='loose'):
"""Get bounding box containing object specified by id.
Currently only supports 'loose' bounding boxes. The bounding box
returned is cuboid aligned.
Args:
resource (intern.resource.Resource): Resource compatible with annotation operations.
resolution (int): 0 indicates native resolution.
id (int): Id of object of interest.
bb_type (optional[string]): Defaults to 'loose'.
Returns:
(dict): {'x_range': [0, 10], 'y_range': [0, 10], 'z_range': [0, 10], 't_range': [0, 10]}
"""
if not resource.valid_volume():
raise RuntimeError('Resource incompatible with the volume service.')
if bb_type != 'loose' and bb_type != 'tight':
raise RuntimeError("bb_type must be either 'loose' or 'tight'.")
return self._volume.get_bounding_box(resource, resolution, id, bb_type)
def get_ids_in_region(
self, resource, resolution,
x_range, y_range, z_range, time_range=[0, 1]):
"""Get all ids in the region defined by x_range, y_range, z_range.
Args:
resource (intern.resource.Resource): Resource compatible with annotation operations.
resolution (int): 0 indicates native resolution.
x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.
y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.
z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.
time_range (optional [list[int]]): time range such as [30, 40] which means t>=30 and t<40. Defaults to [0, 1].
Returns:
(list[int]): Example: [1, 2, 25].
Raises:
requests.HTTPError
TypeError: if resource is not an annotation channel.
"""
return self._volume.get_ids_in_region(
resource, resolution, x_range, y_range, z_range, time_range)
def mesh(self, resource, resolution,
x_range, y_range, z_range, time_range=None,
id_list=[], voxel_unit=VoxelUnits.nm,
voxel_size=[4,4,40], simp_fact = 0, max_simplification_error=60,
normals=False, **kwargs):
"""Generate a mesh of the specified IDs
Args:
resource (intern.resource.Resource): Resource compatible with cutout operations.
resolution (int): 0 indicates native resolution.
x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.
y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.
z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.
time_range (optional [list[int]]): time range such as [30, 40] which means t>=30 and t<40.
time_range (optional [list[int]]): time range such as [30, 40] which means t>=30 and t<40.
id_list (optional [list]): list of object ids to filter the volume by.
voxel_unit (optional VoxelUnit): voxel unit of measurement to derive conversion factor.
voxel_size (optional [list]): list in form [x,y,z] of voxel size. Defaults to 4x4x40nm
simp_fact (optional int): mesh simplification factor, reduces triangles by given factor
max_simplification_error (optional int): Max tolerable error in physical distance
normals (optional bool): if true will calculate normals
Returns:
mesh (intern.service.mesh.Mesh): mesh class
Raises:
RuntimeError when given invalid resource.
Other exceptions may be raised depending on the volume service's implementation.
"""
if not resource.valid_volume():
raise RuntimeError('Resource incompatible with the volume service.')
volume = self._volume.get_cutout(
resource, resolution, x_range, y_range, z_range, time_range, id_list, **kwargs)
mesh = self._mesh.create(
volume, x_range, y_range, z_range, time_range, id_list, voxel_unit, voxel_size,
simp_fact, max_simplification_error, normals)
return mesh
|
jhuapl-boss/intern
|
intern/remote/remote.py
|
Python
|
apache-2.0
| 13,084
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements the recall metric."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Callable, List, Optional, Tuple, Union
import tensorflow as tf
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import safe_ops
from tensorflow_graphics.util import shape
from tensorflow_graphics.util import type_alias
def _cast_to_int(prediction):
return tf.cast(x=prediction, dtype=tf.int32)
def evaluate(ground_truth: type_alias.TensorLike,
prediction: type_alias.TensorLike,
classes: Optional[Union[int, List[int], Tuple[int]]] = None,
reduce_average: bool = True,
prediction_to_category_function: Callable[..., Any] = _cast_to_int,
name: str = "recall_evaluate") -> tf.Tensor:
"""Computes the recall metric for the given ground truth and predictions.
Note:
In the following, A1 to An are optional batch dimensions, which must be
broadcast compatible.
Args:
ground_truth: A tensor of shape `[A1, ..., An, N]`, where the last axis
represents the ground truth labels. Will be cast to int32.
prediction: A tensor of shape `[A1, ..., An, N]`, where the last axis
represents the predictions (which can be continuous).
classes: An integer or a list/tuple of integers representing the classes for
which the recall will be evaluated. In case 'classes' is 'None', the
number of classes will be inferred from the given values and the recall
will be calculated for each of the classes. Defaults to 'None'.
reduce_average: Whether to calculate the average of the recall for each
class and return a single recall value. Defaults to true.
prediction_to_category_function: A function to associate a `prediction` to a
category. Defaults to rounding down the value of the prediction to the
nearest integer value.
name: A name for this op. Defaults to "recall_evaluate".
Returns:
A tensor of shape `[A1, ..., An, C]`, where the last axis represents the
recall calculated for each of the requested classes.
Raises:
ValueError: if the shape of `ground_truth`, `prediction` is not supported.
"""
with tf.name_scope(name):
ground_truth = tf.cast(
x=tf.convert_to_tensor(value=ground_truth), dtype=tf.int32)
prediction = tf.convert_to_tensor(value=prediction)
shape.compare_batch_dimensions(
tensors=(ground_truth, prediction),
tensor_names=("ground_truth", "prediction"),
last_axes=-1,
broadcast_compatible=True)
prediction = prediction_to_category_function(prediction)
if classes is None:
num_classes = tf.math.maximum(
tf.math.reduce_max(input_tensor=ground_truth),
tf.math.reduce_max(input_tensor=prediction)) + 1
classes = tf.range(num_classes)
else:
classes = tf.convert_to_tensor(value=classes)
# Make sure classes is a tensor of rank 1.
classes = tf.reshape(classes, [1]) if tf.rank(classes) == 0 else classes
# Create a confusion matrix for each of the classes (with dimensions
# [A1, ..., An, C, N]).
classes = tf.expand_dims(classes, -1)
ground_truth_per_class = tf.equal(tf.expand_dims(ground_truth, -2), classes)
prediction_per_class = tf.equal(tf.expand_dims(prediction, -2), classes)
# Caluclate the recall for each of the classes.
true_positives = tf.math.reduce_sum(
input_tensor=tf.cast(
x=tf.math.logical_and(ground_truth_per_class, prediction_per_class),
dtype=tf.float32),
axis=-1)
total_ground_truth_positives = tf.math.reduce_sum(
input_tensor=tf.cast(x=ground_truth_per_class, dtype=tf.float32),
axis=-1)
recall_per_class = safe_ops.safe_signed_div(true_positives,
total_ground_truth_positives)
if reduce_average:
return tf.math.reduce_mean(input_tensor=recall_per_class, axis=-1)
else:
return recall_per_class
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
|
tensorflow/graphics
|
tensorflow_graphics/nn/metric/recall.py
|
Python
|
apache-2.0
| 4,776
|
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A set of Unicode strings that are particularly likely to trip up the unwary."""
UNICODE_TEST_STRINGS = [
"common-unicode-ascii-safe-ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"common-unicode-ascii-safe-abcdefghijklmnopqrstuvwxyz",
"common-unicode-ascii-safe-0123456789",
"common-unicode-ascii-safe-:@$-_.!*()',~",
"common-unicode-ascii-safe-unreserved-._~",
"common-unicode-ascii-safe-sub-delims-$!*()',",
"common-unicode-ascii-safe-gen-delims-:@",
'common-unicode-ascii-escaped-"#<>[]^`{}|',
"common-unicode-ascii-escaped-tomcatBlocked-\\",
"common-unicode-ascii-escaped-tomcatBlocked-%5C",
"common-unicode-ascii-semi-colon-test-%3B",
"common-unicode-ascii-escaped-%",
"common-unicode-ascii-escaped-space x x",
"common-unicode-ascii-escape-anyway-+",
"common-unicode-ascii-escape-space-v-plus-+ +%20 %20+",
"path-unicode-ascii-safe-&=&=",
"path-unicode-ascii-escaped-;",
"path-unicode-ascii-escaped-?",
"path-unicode-ascii-escaped-/",
"path-unicode-ascii-escaped-%3F",
"path-unicode-ascii-escaped-%2F",
"path-unicode-ascii-escaped-double-//case",
"path-unicode-ascii-escaped-double-trailing//",
"path-unicode-ascii-escaped-double-%2F%2Fcase",
"path-unicode-ascii-escaped-double-trailing%2F%2F",
"query-unicode-ascii-safe-;",
"query-unicode-ascii-safe-/?",
"query-unicode-ascii-escaped-&=&=",
"fragment-unicode-ascii-safe-;",
"fragment-unicode-ascii-safe-&=&=/?",
"common-unicode-bmp-1byte-escaped-¡¢£",
"common-unicode-bmp-2byte-escaped-䦹䦺",
"common-unicode-supplementary-escaped-𐌔𐌕𐌖𐌗",
"query-unicode-ascii-escaped-this&that=theOther",
"common-ascii-doc-example-urn:lsid:ubio.org:namebank:11815",
"path-ascii-doc-example-10.1000/182",
"query-ascii-doc-example-10.1000/182",
"fragment-ascii-doc-example-10.1000/182",
"path-ascii-doc-example-http://example.com/data/mydata?row=24",
"query-ascii-doc-example-http://example.com/data/mydata?row=24",
"fragment-ascii-doc-example-http://example.com/data/mydata?row=24",
"path-ascii-doc-example-ldap://ldap1.example.net:6666/"
"o=University%20of%20Michigan, c=US??sub?(cn=Babs%20Jensen)",
"query-ascii-doc-example-ldap://ldap1.example.net:6666/"
"o=University%20of%20Michigan, c=US??sub?(cn=Babs%20Jensen)",
"fragment-ascii-doc-example-ldap://ldap1.example.net:6666/"
"o=University%20of%20Michigan, c=US??sub?(cn=Babs%20Jensen)",
"common-bmp-doc-example-ฉันกินกระจกได้",
"common-bmp-doc-example-Is féidir liom ithe gloine",
"decode-space-potential-error-unescaped-plus-+",
]
|
DataONEorg/d1_python
|
test_utilities/src/d1_test/instance_generator/unicode_test_strings.py
|
Python
|
apache-2.0
| 3,468
|
"""Unit tests for Port"""
import unittest
from faucet.port import Port
class MockVLAN(object): # pylint: disable=too-few-public-methods
"""Mock class for VLAN so we can inject into Port"""
def __init__(self, name):
self.name = name
class FaucetPortConfigTest(unittest.TestCase): # pytype: disable=module-attr
"""Test that Port serialises config as it receives it"""
def setUp(self):
"""Defines the default config - this should match the documentation"""
self.default_config = {
'acl_in': None,
'acls_in': None,
'description': None,
'enabled': True,
'hairpin': False,
'lacp': 0,
'lldp_beacon': {},
'loop_protect': False,
'max_hosts': 255,
'mirror': None,
# .to_conf() doesn't export name
'native_vlan': None,
'number': None,
'opstatus_reconf': True,
'output_only': False,
'override_output_port': None,
'permanent_learn': False,
'receive_lldp': False,
'stack': None,
'tagged_vlans': [],
'unicast_flood': True
}
def test_basic_config(self):
"""Tests the minimal config"""
port_number = 1
port_key = 1
input_config = {}
output_config = {
'description': str(port_key),
'number': port_number
}
expected_config = self.default_config
expected_config.update(input_config)
expected_config.update(output_config)
port = Port(port_key, 1, input_config)
output_config = port.to_conf()
self.assertEqual(output_config, expected_config)
key_exceptions = [
'name',
'_id',
'dp_id',
'dyn_phys_up'
]
dict_keys = set(port.__dict__.keys())
conf_keys = set(port.to_conf().keys())
for exception in key_exceptions:
dict_keys.remove(exception)
self.assertEqual(dict_keys, conf_keys)
def test_config_with_port_number(self):
"""Tests the minimal config"""
port_number = 1
port_key = 'port_1'
input_config = {
'number': port_number
}
output_config = {
'description': str(port_key),
'number': port_number
}
expected_config = self.default_config
expected_config.update(input_config)
expected_config.update(output_config)
port = Port(port_key, 1, input_config)
output_config = port.to_conf()
self.assertEqual(output_config, expected_config)
def test_config_with_vlans(self):
"""Tests the config with tagged and native vlans"""
vlan100 = MockVLAN('v100')
vlan200 = MockVLAN('v200')
vlan300 = MockVLAN('v300')
tagged_vlans = [vlan200, vlan300]
native_vlan = vlan100
port_number = 1
port_key = 'port_1'
input_config = {
'number': port_number
}
output_config = {
'description': str(port_key),
'number': port_number,
'native_vlan': vlan100.name,
'tagged_vlans': [vlan200.name, vlan300.name]
}
expected_config = self.default_config
expected_config.update(input_config)
expected_config.update(output_config)
port = Port(port_key, 1, input_config)
port.native_vlan = native_vlan
port.tagged_vlans = tagged_vlans
output_config = port.to_conf()
self.assertEqual(output_config, expected_config)
class FaucetPortMethodTest(unittest.TestCase): # pytype: disable=module-attr
"""Test a range of methods on Port"""
def test_vlans(self):
"""Test that the vlans() method behaves correctly"""
vlan100 = MockVLAN('v100')
vlan200 = MockVLAN('v200')
vlan300 = MockVLAN('v300')
tagged_vlans = [vlan200, vlan300]
native_vlan = vlan100
port = Port(1, 1, {})
port.native_vlan = native_vlan
self.assertEqual(port.vlans(), [native_vlan])
port.tagged_vlans = tagged_vlans
self.assertEqual(set(port.vlans()), set([native_vlan] + tagged_vlans))
port.native_vlan = None
self.assertEqual(set(port.vlans()), set(tagged_vlans))
|
wackerly/faucet
|
tests/test_port.py
|
Python
|
apache-2.0
| 4,410
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.enums.types import extension_setting_device
from google.ads.googleads.v8.enums.types import (
extension_type as gage_extension_type,
)
__protobuf__ = proto.module(
package="google.ads.googleads.v8.resources",
marshal="google.ads.googleads.v8",
manifest={"AdGroupExtensionSetting",},
)
class AdGroupExtensionSetting(proto.Message):
r"""An ad group extension setting.
Attributes:
resource_name (str):
Immutable. The resource name of the ad group extension
setting. AdGroupExtensionSetting resource names have the
form:
``customers/{customer_id}/adGroupExtensionSettings/{ad_group_id}~{extension_type}``
extension_type (google.ads.googleads.v8.enums.types.ExtensionTypeEnum.ExtensionType):
Immutable. The extension type of the ad group
extension setting.
ad_group (str):
Immutable. The resource name of the ad group. The linked
extension feed items will serve under this ad group. AdGroup
resource names have the form:
``customers/{customer_id}/adGroups/{ad_group_id}``
extension_feed_items (Sequence[str]):
The resource names of the extension feed items to serve
under the ad group. ExtensionFeedItem resource names have
the form:
``customers/{customer_id}/extensionFeedItems/{feed_item_id}``
device (google.ads.googleads.v8.enums.types.ExtensionSettingDeviceEnum.ExtensionSettingDevice):
The device for which the extensions will
serve. Optional.
"""
resource_name = proto.Field(proto.STRING, number=1,)
extension_type = proto.Field(
proto.ENUM,
number=2,
enum=gage_extension_type.ExtensionTypeEnum.ExtensionType,
)
ad_group = proto.Field(proto.STRING, number=6, optional=True,)
extension_feed_items = proto.RepeatedField(proto.STRING, number=7,)
device = proto.Field(
proto.ENUM,
number=5,
enum=extension_setting_device.ExtensionSettingDeviceEnum.ExtensionSettingDevice,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleads/google-ads-python
|
google/ads/googleads/v8/resources/types/ad_group_extension_setting.py
|
Python
|
apache-2.0
| 2,824
|
# coding: utf-8
'''
Author: Sean Luo
Email: Sean.S.Luo@gmail.com
Version: module
Date: 2011-12-1
'''
import sqlite3
import xlwt
import os
from mod_utils import get_cities
def make_xls(city_name):
db_path = '../data/database/' + city_name + '.db'
xls_path = '../result_data_folder_has_a_long_name/' + city_name + '.xls'
conn = sqlite3.connect(db_path)
conn.text_factory = str
file = xlwt.Workbook(encoding='utf-8')
table = file.add_sheet('average')
print ('Average')
cur = conn.cursor()
cur.execute('SELECT * FROM tbl_average')
rs = cur.fetchall()
rowcnt = 0
for result in rs:
date = result[0]
time = result[1]
so2 = result[2]
no2 = result[3]
pm10 = result[4]
table.write(rowcnt, 0, date)
table.write(rowcnt, 1, time)
table.write(rowcnt, 2, so2)
table.write(rowcnt, 3, no2)
table.write(rowcnt, 4, pm10)
rowcnt += 1
file.save(xls_path)
cur.execute('SELECT * FROM tbl_station')
rs = cur.fetchall()
for result in rs:
id = result[0]
name = result[1]
print id, name.decode('utf-8')
cur.execute('SELECT * FROM ' + name)
rset = cur.fetchall()
table = file.add_sheet(name)
rowcnt = 0
for r in rset:
date = r[0]
time = r[1]
so2 = r[2]
no2 = r[3]
pm10 = r[4]
table.write(rowcnt, 0, date)
table.write(rowcnt, 1, time)
table.write(rowcnt, 2, so2)
table.write(rowcnt, 3, no2)
table.write(rowcnt, 4, pm10)
rowcnt += 1
file.save(xls_path)
cur.close()
conn.close()
def main():
city = get_cities()
num = len(city)
cnt = 0
for one_city in city:
cnt += 1
print (str(cnt) + '/' + str(num) + '\tMake XLS for: ' + one_city)
try:
make_xls(one_city)
except:
print('Make XLS failed for:' + one_city)
print ('Job\'s done!')
raw_input('Press any ENTER to continue...')
if __name__ == '__main__':
try:
os.mkdir('../result_data_folder_has_a_long_name')
main()
except:
answer = raw_input('Excel files exist, overwrite? (y/N)')
if answer != 'y':
pass
else:
main()
|
seanluo/air-pollution
|
bin/make_xls.py
|
Python
|
apache-2.0
| 2,373
|
#!/usr/bin/env python
import webapp2
from google.appengine.api import mail
from model.model import UserData
class EmailTaskHandler(webapp2.RequestHandler):
def post(self):
owner_id = self.request.get('owner_id')
owner_email = UserData.get_by_id(owner_id).email
if mail.is_email_valid(owner_email):
nickname = self.request.get('nickname')
sender_address = 'Lunch Mate'
' <lunchmates@lunch--mates.appspotmail.com>'
subject = 'Request to join your meeting'
body = '%s has requested to join your meeting!' % nickname
mail.send_mail(sender_address, owner_email, subject, body)
|
GoogleCloudPlatformBook/lunchmates-api
|
tasks/emails.py
|
Python
|
apache-2.0
| 683
|
#
# Copyright 2015 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import glob
import uuid
import click
import logging
import unicodedata
from PIL import Image
from rich.traceback import install
from bidi.algorithm import get_display
from typing import cast, Set, List, IO, Any, Dict
from kraken.lib import log
from kraken.lib.progress import KrakenProgressBar
from kraken.lib.exceptions import KrakenCairoSurfaceException
from kraken.lib.exceptions import KrakenInputException
from kraken.lib.default_specs import (SEGMENTATION_HYPER_PARAMS,
RECOGNITION_HYPER_PARAMS,
SEGMENTATION_SPEC,
RECOGNITION_SPEC)
APP_NAME = 'kraken'
logging.captureWarnings(True)
logger = logging.getLogger('kraken')
# install rich traceback handler
install(suppress=[click])
# raise default max image size to 20k * 20k pixels
Image.MAX_IMAGE_PIXELS = 20000 ** 2
def message(msg, **styles):
if logger.getEffectiveLevel() >= 30:
click.secho(msg, **styles)
@click.group()
@click.version_option()
@click.pass_context
@click.option('-v', '--verbose', default=0, count=True)
@click.option('-s', '--seed', default=None, type=click.INT,
help='Seed for numpy\'s and torch\'s RNG. Set to a fixed value to '
'ensure reproducible random splits of data')
def cli(ctx, verbose, seed):
if seed:
import numpy.random
numpy.random.seed(seed)
from torch import manual_seed
manual_seed(seed)
ctx.meta['verbose'] = verbose
log.set_logger(logger, level=30 - min(10 * verbose, 20))
def _validate_manifests(ctx, param, value):
images = []
for manifest in value:
for entry in manifest.readlines():
im_p = entry.rstrip('\r\n')
if os.path.isfile(im_p):
images.append(im_p)
else:
logger.warning('Invalid entry "{}" in {}'.format(im_p, manifest.name))
return images
def _expand_gt(ctx, param, value):
images = []
for expression in value:
images.extend([x for x in glob.iglob(expression, recursive=True) if os.path.isfile(x)])
return images
def _validate_merging(ctx, param, value):
"""
Maps baseline/region merging to a dict of merge structures.
"""
if not value:
return None
merge_dict = {} # type: Dict[str, str]
try:
for m in value:
k, v = m.split(':')
merge_dict[v] = k # type: ignore
except Exception:
raise click.BadParameter('Mappings must be in format target:src')
return merge_dict
@cli.command('segtrain')
@click.pass_context
@click.option('-o', '--output', show_default=True, type=click.Path(), default='model', help='Output model file')
@click.option('-s', '--spec', show_default=True,
default=SEGMENTATION_SPEC,
help='VGSL spec of the baseline labeling network')
@click.option('--line-width',
show_default=True,
default=SEGMENTATION_HYPER_PARAMS['line_width'],
help='The height of each baseline in the target after scaling')
@click.option('-i', '--load', show_default=True, type=click.Path(exists=True,
readable=True), help='Load existing file to continue training')
@click.option('-F', '--freq', show_default=True, default=SEGMENTATION_HYPER_PARAMS['freq'], type=click.FLOAT,
help='Model saving and report generation frequency in epochs '
'during training. If frequency is >1 it must be an integer, '
'i.e. running validation every n-th epoch.')
@click.option('-q',
'--quit',
show_default=True,
default=SEGMENTATION_HYPER_PARAMS['quit'],
type=click.Choice(['early',
'dumb']),
help='Stop condition for training. Set to `early` for early stopping or `dumb` for fixed number of epochs')
@click.option('-N',
'--epochs',
show_default=True,
default=SEGMENTATION_HYPER_PARAMS['epochs'],
help='Number of epochs to train for')
@click.option('--min-epochs',
show_default=True,
default=SEGMENTATION_HYPER_PARAMS['min_epochs'],
help='Minimal number of epochs to train for when using early stopping.')
@click.option('--lag',
show_default=True,
default=SEGMENTATION_HYPER_PARAMS['lag'],
help='Number of evaluations (--report frequence) to wait before stopping training without improvement')
@click.option('--min-delta',
show_default=True,
default=SEGMENTATION_HYPER_PARAMS['min_delta'],
type=click.FLOAT,
help='Minimum improvement between epochs to reset early stopping. By default it scales the delta by the best loss')
@click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)')
@click.option('--optimizer',
show_default=True,
default=SEGMENTATION_HYPER_PARAMS['optimizer'],
type=click.Choice(['Adam',
'SGD',
'RMSprop',
'Lamb']),
help='Select optimizer')
@click.option('-r', '--lrate', show_default=True, default=SEGMENTATION_HYPER_PARAMS['lrate'], help='Learning rate')
@click.option('-m', '--momentum', show_default=True, default=SEGMENTATION_HYPER_PARAMS['momentum'], help='Momentum')
@click.option('-w', '--weight-decay', show_default=True,
default=SEGMENTATION_HYPER_PARAMS['weight_decay'], help='Weight decay')
@click.option('--schedule',
show_default=True,
type=click.Choice(['constant',
'1cycle',
'exponential',
'cosine',
'step',
'reduceonplateau']),
default=SEGMENTATION_HYPER_PARAMS['schedule'],
help='Set learning rate scheduler. For 1cycle, cycle length is determined by the `--step-size` option.')
@click.option('-g',
'--gamma',
show_default=True,
default=SEGMENTATION_HYPER_PARAMS['gamma'],
help='Decay factor for exponential, step, and reduceonplateau learning rate schedules')
@click.option('-ss',
'--step-size',
show_default=True,
default=SEGMENTATION_HYPER_PARAMS['step_size'],
help='Number of validation runs between learning rate decay for exponential and step LR schedules')
@click.option('--sched-patience',
show_default=True,
default=SEGMENTATION_HYPER_PARAMS['rop_patience'],
help='Minimal number of validation runs between LR reduction for reduceonplateau LR schedule.')
@click.option('--cos-max',
show_default=True,
default=SEGMENTATION_HYPER_PARAMS['cos_t_max'],
help='Epoch of minimal learning rate for cosine LR scheduler.')
@click.option('-p', '--partition', show_default=True, default=0.9,
help='Ground truth data partition ratio between train/validation set')
@click.option('-t', '--training-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with additional paths to training data')
@click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with paths to evaluation data. Overrides the `-p` parameter')
@click.option('--workers', show_default=True, default=1, help='Number of OpenMP threads and workers when running on CPU.')
@click.option('--load-hyper-parameters/--no-load-hyper-parameters', show_default=True, default=False,
help='When loading an existing model, retrieve hyper-parameters from the model')
@click.option('--force-binarization/--no-binarization', show_default=True,
default=False, help='Forces input images to be binary, otherwise '
'the appropriate color format will be auto-determined through the '
'network specification. Will be ignored in `path` mode.')
@click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page']), default='xml',
help='Sets the training data format. In ALTO and PageXML mode all '
'data is extracted from xml files containing both baselines and a '
'link to source images. In `path` mode arguments are image files '
'sharing a prefix up to the last extension with JSON `.path` files '
'containing the baseline information.')
@click.option('--suppress-regions/--no-suppress-regions', show_default=True,
default=False, help='Disables region segmentation training.')
@click.option('--suppress-baselines/--no-suppress-baselines', show_default=True,
default=False, help='Disables baseline segmentation training.')
@click.option('-vr', '--valid-regions', show_default=True, default=None, multiple=True,
help='Valid region types in training data. May be used multiple times.')
@click.option('-vb', '--valid-baselines', show_default=True, default=None, multiple=True,
help='Valid baseline types in training data. May be used multiple times.')
@click.option('-mr',
'--merge-regions',
show_default=True,
default=None,
help='Region merge mapping. One or more mappings of the form `$target:$src` where $src is merged into $target.',
multiple=True,
callback=_validate_merging)
@click.option('-mb',
'--merge-baselines',
show_default=True,
default=None,
help='Baseline type merge mapping. Same syntax as `--merge-regions`',
multiple=True,
callback=_validate_merging)
@click.option('-br', '--bounding-regions', show_default=True, default=None, multiple=True,
help='Regions treated as boundaries for polygonization purposes. May be used multiple times.')
@click.option('--augment/--no-augment',
show_default=True,
default=SEGMENTATION_HYPER_PARAMS['augment'],
help='Enable image augmentation')
@click.option('--resize', show_default=True, default='fail', type=click.Choice(['add', 'both', 'fail']),
help='Output layer resizing option. If set to `add` new classes will be '
'added, `both` will set the layer to match exactly '
'the training data classes, `fail` will abort if training data and model '
'classes do not match.')
@click.option('-tl', '--topline', 'topline', show_default=True, flag_value='topline',
help='Switch for the baseline location in the scripts. '
'Set to topline if the data is annotated with a hanging baseline, as is '
'common with Hebrew, Bengali, Devanagari, etc. Set to '
' centerline for scripts annotated with a central line.')
@click.option('-cl', '--centerline', 'topline', flag_value='centerline')
@click.option('-bl', '--baseline', 'topline', flag_value='baseline', default='baseline')
@click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def segtrain(ctx, output, spec, line_width, load, freq, quit, epochs, min_epochs,
lag, min_delta, device, optimizer, lrate, momentum, weight_decay,
schedule, gamma, step_size, sched_patience, cos_max, partition,
training_files, evaluation_files, workers, load_hyper_parameters,
force_binarization, format_type, suppress_regions,
suppress_baselines, valid_regions, valid_baselines, merge_regions,
merge_baselines, bounding_regions, augment, resize, topline, ground_truth):
"""
Trains a baseline labeling model for layout analysis
"""
import shutil
from kraken.lib.train import SegmentationModel, KrakenTrainer
if resize != 'fail' and not load:
raise click.BadOptionUsage('resize', 'resize option requires loading an existing model')
if not (0 <= freq <= 1) and freq % 1.0 != 0:
raise click.BadOptionUsage('freq', 'freq needs to be either in the interval [0,1.0] or a positive integer.')
logger.info('Building ground truth set from {} document images'.format(len(ground_truth) + len(training_files)))
# populate hyperparameters from command line args
hyper_params = SEGMENTATION_HYPER_PARAMS.copy()
hyper_params.update({'line_width': line_width,
'freq': freq,
'quit': quit,
'epochs': epochs,
'min_epochs': min_epochs,
'lag': lag,
'min_delta': min_delta,
'optimizer': optimizer,
'lrate': lrate,
'momentum': momentum,
'weight_decay': weight_decay,
'schedule': schedule,
'augment': augment,
'gamma': gamma,
'step_size': step_size,
'rop_patience': sched_patience,
'cos_t_max': cos_max})
# disable automatic partition when given evaluation set explicitly
if evaluation_files:
partition = 1
ground_truth = list(ground_truth)
# merge training_files into ground_truth list
if training_files:
ground_truth.extend(training_files)
if len(ground_truth) == 0:
raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.')
loc = {'topline': True,
'baseline': False,
'centerline': None}
topline = loc[topline]
if device == 'cpu':
device = None
elif device.startswith('cuda'):
device = [int(device.split(':')[-1])]
if hyper_params['freq'] > 1:
val_check_interval = {'check_val_every_n_epoch': int(hyper_params['freq'])}
else:
val_check_interval = {'val_check_interval': hyper_params['freq']}
model = SegmentationModel(hyper_params,
output=output,
spec=spec,
model=load,
training_data=ground_truth,
evaluation_data=evaluation_files,
partition=partition,
num_workers=workers,
load_hyper_parameters=load_hyper_parameters,
force_binarization=force_binarization,
format_type=format_type,
suppress_regions=suppress_regions,
suppress_baselines=suppress_baselines,
valid_regions=valid_regions,
valid_baselines=valid_baselines,
merge_regions=merge_regions,
merge_baselines=merge_baselines,
bounding_regions=bounding_regions,
resize=resize,
topline=topline)
message('Training line types:')
for k, v in model.train_set.dataset.class_mapping['baselines'].items():
message(f' {k}\t{v}\t{model.train_set.dataset.class_stats["baselines"][k]}')
message('Training region types:')
for k, v in model.train_set.dataset.class_mapping['regions'].items():
message(f' {k}\t{v}\t{model.train_set.dataset.class_stats["regions"][k]}')
if len(model.train_set) == 0:
raise click.UsageError('No valid training data was provided to the train command. Use `-t` or the `ground_truth` argument.')
trainer = KrakenTrainer(gpus=device,
max_epochs=hyper_params['epochs'] if hyper_params['quit'] == 'dumb' else -1,
min_epochs=hyper_params['min_epochs'],
enable_progress_bar=True if not ctx.meta['verbose'] else False,
**val_check_interval)
trainer.fit(model)
if quit == 'early':
message('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(
output, trainer.stopper.best_epoch, trainer.stopper.best_loss))
logger.info('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(
output, trainer.stopper.best_epoch, trainer.stopper.best_loss))
shutil.copy(f'{output}_{trainer.stopper.best_epoch}.mlmodel', f'{output}_best.mlmodel')
@cli.command('train')
@click.pass_context
@click.option('-B', '--batch-size', show_default=True, type=click.INT,
default=RECOGNITION_HYPER_PARAMS['batch_size'], help='batch sample size')
@click.option('--pad', show_default=True, type=click.INT, default=16, help='Left and right '
'padding around lines')
@click.option('-o', '--output', show_default=True, type=click.Path(), default='model', help='Output model file')
@click.option('-s', '--spec', show_default=True, default=RECOGNITION_SPEC,
help='VGSL spec of the network to train. CTC layer will be added automatically.')
@click.option('-a', '--append', show_default=True, default=None, type=click.INT,
help='Removes layers before argument and then appends spec. Only works when loading an existing model')
@click.option('-i', '--load', show_default=True, type=click.Path(exists=True,
readable=True), help='Load existing file to continue training')
@click.option('-F', '--freq', show_default=True, default=RECOGNITION_HYPER_PARAMS['freq'], type=click.FLOAT,
help='Model saving and report generation frequency in epochs '
'during training. If frequency is >1 it must be an integer, '
'i.e. running validation every n-th epoch.')
@click.option('-q',
'--quit',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['quit'],
type=click.Choice(['early',
'dumb']),
help='Stop condition for training. Set to `early` for early stooping or `dumb` for fixed number of epochs')
@click.option('-N',
'--epochs',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['epochs'],
help='Number of epochs to train for')
@click.option('--min-epochs',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['min_epochs'],
help='Minimal number of epochs to train for when using early stopping.')
@click.option('--lag',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['lag'],
help='Number of evaluations (--report frequence) to wait before stopping training without improvement')
@click.option('--min-delta',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['min_delta'],
type=click.FLOAT,
help='Minimum improvement between epochs to reset early stopping. Default is scales the delta by the best loss')
@click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)')
@click.option('--optimizer',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['optimizer'],
type=click.Choice(['Adam',
'SGD',
'RMSprop',
'Lamb']),
help='Select optimizer')
@click.option('-r', '--lrate', show_default=True, default=RECOGNITION_HYPER_PARAMS['lrate'], help='Learning rate')
@click.option('-m', '--momentum', show_default=True, default=RECOGNITION_HYPER_PARAMS['momentum'], help='Momentum')
@click.option('-w', '--weight-decay', show_default=True, type=float,
default=RECOGNITION_HYPER_PARAMS['weight_decay'], help='Weight decay')
@click.option('--schedule',
show_default=True,
type=click.Choice(['constant',
'1cycle',
'exponential',
'cosine',
'step',
'reduceonplateau']),
default=RECOGNITION_HYPER_PARAMS['schedule'],
help='Set learning rate scheduler. For 1cycle, cycle length is determined by the `--epoch` option.')
@click.option('-g',
'--gamma',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['gamma'],
help='Decay factor for exponential, step, and reduceonplateau learning rate schedules')
@click.option('-ss',
'--step-size',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['step_size'],
help='Number of validation runs between learning rate decay for exponential and step LR schedules')
@click.option('--sched-patience',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['rop_patience'],
help='Minimal number of validation runs between LR reduction for reduceonplateau LR schedule.')
@click.option('--cos-max',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['cos_t_max'],
help='Epoch of minimal learning rate for cosine LR scheduler.')
@click.option('-p', '--partition', show_default=True, default=0.9,
help='Ground truth data partition ratio between train/validation set')
@click.option('--fixed-splits/--ignore-fixed-split', show_default=True, default=False,
help='Whether to honor fixed splits in binary datasets.')
@click.option('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']),
default=RECOGNITION_HYPER_PARAMS['normalization'], help='Ground truth normalization')
@click.option('-n', '--normalize-whitespace/--no-normalize-whitespace', show_default=True,
default=RECOGNITION_HYPER_PARAMS['normalize_whitespace'], help='Normalizes unicode whitespace')
@click.option('-c', '--codec', show_default=True, default=None, type=click.File(mode='r', lazy=True),
help='Load a codec JSON definition (invalid if loading existing model)')
@click.option('--resize', show_default=True, default='fail', type=click.Choice(['add', 'both', 'fail']),
help='Codec/output layer resizing option. If set to `add` code '
'points will be added, `both` will set the layer to match exactly '
'the training data, `fail` will abort if training data and model '
'codec do not match.')
@click.option('--reorder/--no-reorder', show_default=True, default=True, help='Reordering of code points to display order')
@click.option('--base-dir', show_default=True, default='auto',
type=click.Choice(['L', 'R', 'auto']), help='Set base text '
'direction. This should be set to the direction used during the '
'creation of the training data. If set to `auto` it will be '
'overridden by any explicit value given in the input files.')
@click.option('-t', '--training-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with additional paths to training data')
@click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with paths to evaluation data. Overrides the `-p` parameter')
@click.option('--workers', show_default=True, default=1, help='Number of OpenMP threads and workers when running on CPU.')
@click.option('--load-hyper-parameters/--no-load-hyper-parameters', show_default=True, default=False,
help='When loading an existing model, retrieve hyperparameters from the model')
@click.option('--repolygonize/--no-repolygonize', show_default=True,
default=False, help='Repolygonizes line data in ALTO/PageXML '
'files. This ensures that the trained model is compatible with the '
'segmenter in kraken even if the original image files either do '
'not contain anything but transcriptions and baseline information '
'or the polygon data was created using a different method. Will '
'be ignored in `path` mode. Note that this option will be slow '
'and will not scale input images to the same size as the segmenter '
'does.')
@click.option('--force-binarization/--no-binarization', show_default=True,
default=False, help='Forces input images to be binary, otherwise '
'the appropriate color format will be auto-determined through the '
'network specification. Will be ignored in `path` mode.')
@click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page', 'binary']), default='path',
help='Sets the training data format. In ALTO and PageXML mode all '
'data is extracted from xml files containing both line definitions and a '
'link to source images. In `path` mode arguments are image files '
'sharing a prefix up to the last extension with `.gt.txt` text files '
'containing the transcription. In binary mode files are datasets '
'files containing pre-extracted text lines.')
@click.option('--augment/--no-augment',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['augment'],
help='Enable image augmentation')
@click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def train(ctx, batch_size, pad, output, spec, append, load, freq, quit, epochs,
min_epochs, lag, min_delta, device, optimizer, lrate, momentum,
weight_decay, schedule, gamma, step_size, sched_patience, cos_max,
partition, fixed_splits, normalization, normalize_whitespace, codec,
resize, reorder, base_dir, training_files, evaluation_files, workers,
load_hyper_parameters, repolygonize, force_binarization, format_type,
augment, ground_truth):
"""
Trains a model from image-text pairs.
"""
if not load and append:
raise click.BadOptionUsage('append', 'append option requires loading an existing model')
if resize != 'fail' and not load:
raise click.BadOptionUsage('resize', 'resize option requires loading an existing model')
if not (0 <= freq <= 1) and freq % 1.0 != 0:
raise click.BadOptionUsage('freq', 'freq needs to be either in the interval [0,1.0] or a positive integer.')
import json
import shutil
from kraken.lib.train import RecognitionModel, KrakenTrainer
hyper_params = RECOGNITION_HYPER_PARAMS.copy()
hyper_params.update({'freq': freq,
'pad': pad,
'batch_size': batch_size,
'quit': quit,
'epochs': epochs,
'min_epochs': min_epochs,
'lag': lag,
'min_delta': min_delta,
'optimizer': optimizer,
'lrate': lrate,
'momentum': momentum,
'weight_decay': weight_decay,
'schedule': schedule,
'gamma': gamma,
'step_size': step_size,
'rop_patience': sched_patience,
'cos_t_max': cos_max,
'normalization': normalization,
'normalize_whitespace': normalize_whitespace,
'augment': augment})
# disable automatic partition when given evaluation set explicitly
if evaluation_files:
partition = 1
ground_truth = list(ground_truth)
# merge training_files into ground_truth list
if training_files:
ground_truth.extend(training_files)
if len(ground_truth) == 0:
raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.')
if reorder and base_dir != 'auto':
reorder = base_dir
if codec:
logger.debug(f'Loading codec file from {codec}')
codec = json.load(codec)
if device == 'cpu':
device = None
elif device.startswith('cuda'):
device = [int(device.split(':')[-1])]
if hyper_params['freq'] > 1:
val_check_interval = {'check_val_every_n_epoch': int(hyper_params['freq'])}
else:
val_check_interval = {'val_check_interval': hyper_params['freq']}
model = RecognitionModel(hyper_params=hyper_params,
output=output,
spec=spec,
append=append,
model=load,
reorder=reorder,
training_data=ground_truth,
evaluation_data=evaluation_files,
partition=partition,
binary_dataset_split=fixed_splits,
num_workers=workers,
load_hyper_parameters=load_hyper_parameters,
repolygonize=repolygonize,
force_binarization=force_binarization,
format_type=format_type,
codec=codec,
resize=resize)
trainer = KrakenTrainer(gpus=device,
max_epochs=hyper_params['epochs'] if hyper_params['quit'] == 'dumb' else -1,
min_epochs=hyper_params['min_epochs'],
enable_progress_bar=True if not ctx.meta['verbose'] else False,
**val_check_interval)
try:
trainer.fit(model)
except KrakenInputException as e:
if e.args[0].startswith('Training data and model codec alphabets mismatch') and resize == 'fail':
raise click.BadOptionUsage('resize', 'Mismatched training data for loaded model. Set option `--resize` to `add` or `both`')
else:
raise e
if quit == 'early':
message('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(
output, model.best_epoch, model.best_metric))
logger.info('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(
output, model.best_epoch, model.best_metric))
shutil.copy(f'{output}_{model.best_epoch}.mlmodel', f'{output}_best.mlmodel')
@cli.command('test')
@click.pass_context
@click.option('-B', '--batch-size', show_default=True, type=click.INT,
default=RECOGNITION_HYPER_PARAMS['batch_size'], help='Batch sample size')
@click.option('-m', '--model', show_default=True, type=click.Path(exists=True, readable=True),
multiple=True, help='Model(s) to evaluate')
@click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with paths to evaluation data.')
@click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)')
@click.option('--pad', show_default=True, type=click.INT, default=16, help='Left and right '
'padding around lines')
@click.option('--workers', show_default=True, default=1, help='Number of OpenMP threads when running on CPU.')
@click.option('--reorder/--no-reorder', show_default=True, default=True, help='Reordering of code points to display order')
@click.option('--base-dir', show_default=True, default='auto',
type=click.Choice(['L', 'R', 'auto']), help='Set base text '
'direction. This should be set to the direction used during the '
'creation of the training data. If set to `auto` it will be '
'overridden by any explicit value given in the input files.')
@click.option('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']),
default=None, help='Ground truth normalization')
@click.option('-n', '--normalize-whitespace/--no-normalize-whitespace',
show_default=True, default=True, help='Normalizes unicode whitespace')
@click.option('--repolygonize/--no-repolygonize', show_default=True,
default=False, help='Repolygonizes line data in ALTO/PageXML '
'files. This ensures that the trained model is compatible with the '
'segmenter in kraken even if the original image files either do '
'not contain anything but transcriptions and baseline information '
'or the polygon data was created using a different method. Will '
'be ignored in `path` mode. Note, that this option will be slow '
'and will not scale input images to the same size as the segmenter '
'does.')
@click.option('--force-binarization/--no-binarization', show_default=True,
default=False, help='Forces input images to be binary, otherwise '
'the appropriate color format will be auto-determined through the '
'network specification. Will be ignored in `path` mode.')
@click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page', 'binary']), default='path',
help='Sets the training data format. In ALTO and PageXML mode all '
'data is extracted from xml files containing both baselines and a '
'link to source images. In `path` mode arguments are image files '
'sharing a prefix up to the last extension with JSON `.path` files '
'containing the baseline information. In `binary` mode files are '
'collections of pre-extracted text line images.')
@click.argument('test_set', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def test(ctx, batch_size, model, evaluation_files, device, pad, workers,
reorder, base_dir, normalization, normalize_whitespace, repolygonize,
force_binarization, format_type, test_set):
"""
Evaluate on a test set.
"""
if not model:
raise click.UsageError('No model to evaluate given.')
import numpy as np
from torch.utils.data import DataLoader
from kraken.serialization import render_report
from kraken.lib import models
from kraken.lib.xml import preparse_xml_data
from kraken.lib.dataset import (global_align, compute_confusions,
PolygonGTDataset, GroundTruthDataset,
ImageInputTransforms,
ArrowIPCRecognitionDataset,
collate_sequences)
logger.info('Building test set from {} line images'.format(len(test_set) + len(evaluation_files)))
nn = {}
for p in model:
message('Loading model {}\t'.format(p), nl=False)
nn[p] = models.load_any(p)
message('\u2713', fg='green')
test_set = list(test_set)
# set number of OpenMP threads
next(iter(nn.values())).nn.set_num_threads(1)
if evaluation_files:
test_set.extend(evaluation_files)
if len(test_set) == 0:
raise click.UsageError('No evaluation data was provided to the test command. Use `-e` or the `test_set` argument.')
if format_type in ['xml', 'page', 'alto']:
if repolygonize:
message('Repolygonizing data')
test_set = preparse_xml_data(test_set, format_type, repolygonize)
valid_norm = False
DatasetClass = PolygonGTDataset
elif format_type == 'binary':
DatasetClass = ArrowIPCRecognitionDataset
if repolygonize:
logger.warning('Repolygonization enabled in `binary` mode. Will be ignored.')
test_set = [{'file': file} for file in test_set]
valid_norm = False
else:
DatasetClass = GroundTruthDataset
if force_binarization:
logger.warning('Forced binarization enabled in `path` mode. Will be ignored.')
force_binarization = False
if repolygonize:
logger.warning('Repolygonization enabled in `path` mode. Will be ignored.')
test_set = [{'image': img} for img in test_set]
valid_norm = True
if len(test_set) == 0:
raise click.UsageError('No evaluation data was provided to the test command. Use `-e` or the `test_set` argument.')
if reorder and base_dir != 'auto':
reorder = base_dir
acc_list = []
for p, net in nn.items():
algn_gt: List[str] = []
algn_pred: List[str] = []
chars = 0
error = 0
message('Evaluating {}'.format(p))
logger.info('Evaluating {}'.format(p))
batch, channels, height, width = net.nn.input
ts = ImageInputTransforms(batch, height, width, channels, pad, valid_norm, force_binarization)
ds = DatasetClass(normalization=normalization,
whitespace_normalization=normalize_whitespace,
reorder=reorder,
im_transforms=ts)
for line in test_set:
try:
ds.add(**line)
except KrakenInputException as e:
logger.info(e)
# don't encode validation set as the alphabets may not match causing encoding failures
ds.no_encode()
ds_loader = DataLoader(ds,
batch_size=batch_size,
num_workers=workers,
pin_memory=True,
collate_fn=collate_sequences)
with KrakenProgressBar() as progress:
batches = len(ds_loader)
pred_task = progress.add_task('Evaluating', total=batches, visible=True if not ctx.meta['verbose'] else False)
for batch in ds_loader:
im = batch['image']
text = batch['target']
lens = batch['seq_lens']
try:
pred = net.predict_string(im, lens)
for x, y in zip(pred, text):
chars += len(y)
c, algn1, algn2 = global_align(y, x)
algn_gt.extend(algn1)
algn_pred.extend(algn2)
error += c
except FileNotFoundError as e:
batches -= 1
progress.update(pred_task, total=batches)
logger.warning('{} {}. Skipping.'.format(e.strerror, e.filename))
except KrakenInputException as e:
batches -= 1
progress.update(pred_task, total=batches)
logger.warning(str(e))
progress.update(pred_task, advance=1)
acc_list.append((chars - error) / chars)
confusions, scripts, ins, dels, subs = compute_confusions(algn_gt, algn_pred)
rep = render_report(p, chars, error, confusions, scripts, ins, dels, subs)
logger.info(rep)
message(rep)
logger.info('Average accuracy: {:0.2f}%, (stddev: {:0.2f})'.format(np.mean(acc_list) * 100, np.std(acc_list) * 100))
message('Average accuracy: {:0.2f}%, (stddev: {:0.2f})'.format(np.mean(acc_list) * 100, np.std(acc_list) * 100))
@cli.command('extract')
@click.pass_context
@click.option('-b', '--binarize/--no-binarize', show_default=True, default=True,
help='Binarize color/grayscale images')
@click.option('-u', '--normalization', show_default=True,
type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None,
help='Normalize ground truth')
@click.option('-s', '--normalize-whitespace/--no-normalize-whitespace',
show_default=True, default=True, help='Normalizes unicode whitespace')
@click.option('-n', '--reorder/--no-reorder', default=False, show_default=True,
help='Reorder transcribed lines to display order')
@click.option('-r', '--rotate/--no-rotate', default=True, show_default=True,
help='Skip rotation of vertical lines')
@click.option('-o', '--output', type=click.Path(), default='training', show_default=True,
help='Output directory')
@click.option('--format',
default='{idx:06d}',
show_default=True,
help='Format for extractor output. valid fields are `src` (source file), `idx` (line number), and `uuid` (v4 uuid)')
@click.argument('transcriptions', nargs=-1, type=click.File(lazy=True))
def extract(ctx, binarize, normalization, normalize_whitespace, reorder,
rotate, output, format, transcriptions):
"""
Extracts image-text pairs from a transcription environment created using
``ketos transcribe``.
"""
import regex
import base64
from io import BytesIO
from PIL import Image
from lxml import html, etree
from kraken import binarization
try:
os.mkdir(output)
except Exception:
pass
text_transforms = []
if normalization:
text_transforms.append(lambda x: unicodedata.normalize(normalization, x))
if normalize_whitespace:
text_transforms.append(lambda x: regex.sub(r'\s', ' ', x))
if reorder:
text_transforms.append(get_display)
idx = 0
manifest = []
with KrakenProgressBar() as progress:
read_task = progress.add_task('Reading transcriptions', total=len(transcriptions), visible=True if not ctx.meta['verbose'] else False)
for fp in transcriptions:
logger.info('Reading {}'.format(fp.name))
doc = html.parse(fp)
etree.strip_tags(doc, etree.Comment)
td = doc.find(".//meta[@itemprop='text_direction']")
if td is None:
td = 'horizontal-lr'
else:
td = td.attrib['content']
im = None
dest_dict = {'output': output, 'idx': 0, 'src': fp.name, 'uuid': str(uuid.uuid4())}
for section in doc.xpath('//section'):
img = section.xpath('.//img')[0].get('src')
fd = BytesIO(base64.b64decode(img.split(',')[1]))
im = Image.open(fd)
if not im:
logger.info('Skipping {} because image not found'.format(fp.name))
break
if binarize:
im = binarization.nlbin(im)
for line in section.iter('li'):
if line.get('contenteditable') and (not u''.join(line.itertext()).isspace() and u''.join(line.itertext())):
dest_dict['idx'] = idx
dest_dict['uuid'] = str(uuid.uuid4())
logger.debug('Writing line {:06d}'.format(idx))
l_img = im.crop([int(x) for x in line.get('data-bbox').split(',')])
if rotate and td.startswith('vertical'):
im.rotate(90, expand=True)
l_img.save(('{output}/' + format + '.png').format(**dest_dict))
manifest.append((format + '.png').format(**dest_dict))
text = u''.join(line.itertext()).strip()
for func in text_transforms:
text = func(text)
with open(('{output}/' + format + '.gt.txt').format(**dest_dict), 'wb') as t:
t.write(text.encode('utf-8'))
idx += 1
progress.update(read_task, advance=1)
logger.info('Extracted {} lines'.format(idx))
with open('{}/manifest.txt'.format(output), 'w') as fp:
fp.write('\n'.join(manifest))
@cli.command('transcribe')
@click.pass_context
@click.option('-d', '--text-direction', default='horizontal-lr',
type=click.Choice(['horizontal-lr', 'horizontal-rl', 'vertical-lr', 'vertical-rl']),
help='Sets principal text direction', show_default=True)
@click.option('--scale', default=None, type=click.FLOAT)
@click.option('--bw/--orig', default=True, show_default=True,
help="Put nonbinarized images in output")
@click.option('-m', '--maxcolseps', default=2, type=click.INT, show_default=True)
@click.option('-b/-w', '--black_colseps/--white_colseps', default=False, show_default=True)
@click.option('-f', '--font', default='',
help='Font family to use')
@click.option('-fs', '--font-style', default=None,
help='Font style to use')
@click.option('-p', '--prefill', default=None,
help='Use given model for prefill mode.')
@click.option('--pad', show_default=True, type=(int, int), default=(0, 0),
help='Left and right padding around lines')
@click.option('-l', '--lines', type=click.Path(exists=True), show_default=True,
help='JSON file containing line coordinates')
@click.option('-o', '--output', type=click.File(mode='wb'), default='transcription.html',
help='Output file', show_default=True)
@click.argument('images', nargs=-1, type=click.File(mode='rb', lazy=True))
def transcription(ctx, text_direction, scale, bw, maxcolseps,
black_colseps, font, font_style, prefill, pad, lines, output,
images):
"""
Creates transcription environments for ground truth generation.
"""
import json
from PIL import Image
from kraken import rpred
from kraken import pageseg
from kraken import transcribe
from kraken import binarization
from kraken.lib import models
ti = transcribe.TranscriptionInterface(font, font_style)
if len(images) > 1 and lines:
raise click.UsageError('--lines option is incompatible with multiple image files')
if prefill:
logger.info('Loading model {}'.format(prefill))
message('Loading ANN', nl=False)
prefill = models.load_any(prefill)
message('\u2713', fg='green')
with KrakenProgressBar() as progress:
read_task = progress.add_task('Reading images', total=len(images), visible=True if not ctx.meta['verbose'] else False)
for fp in images:
logger.info('Reading {}'.format(fp.name))
im = Image.open(fp)
if im.mode not in ['1', 'L', 'P', 'RGB']:
logger.warning('Input {} is in {} color mode. Converting to RGB'.format(fp.name, im.mode))
im = im.convert('RGB')
logger.info('Binarizing page')
im_bin = binarization.nlbin(im)
im_bin = im_bin.convert('1')
logger.info('Segmenting page')
if not lines:
res = pageseg.segment(im_bin, text_direction, scale, maxcolseps, black_colseps, pad=pad)
else:
with click.open_file(lines, 'r') as fp:
try:
fp = cast(IO[Any], fp)
res = json.load(fp)
except ValueError as e:
raise click.UsageError('{} invalid segmentation: {}'.format(lines, str(e)))
if prefill:
it = rpred.rpred(prefill, im_bin, res.copy())
preds = []
logger.info('Recognizing')
for pred in it:
logger.debug('{}'.format(pred.prediction))
preds.append(pred)
ti.add_page(im, res, records=preds)
else:
ti.add_page(im, res)
fp.close()
progress.update(read_task, advance=1)
logger.info('Writing transcription to {}'.format(output.name))
message('Writing output ', nl=False)
ti.write(output)
message('\u2713', fg='green')
@cli.command('linegen')
@click.pass_context
@click.option('-f', '--font', default='sans',
help='Font family to render texts in.')
@click.option('-n', '--maxlines', type=click.INT, default=0,
help='Maximum number of lines to generate')
@click.option('-e', '--encoding', default='utf-8',
help='Decode text files with given codec.')
@click.option('-u', '--normalization',
type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None,
help='Normalize ground truth')
@click.option('-ur', '--renormalize',
type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None,
help='Renormalize text for rendering purposes.')
@click.option('--reorder/--no-reorder', default=False, help='Reorder code points to display order')
@click.option('-fs', '--font-size', type=click.INT, default=32,
help='Font size to render texts in.')
@click.option('-fw', '--font-weight', type=click.INT, default=400,
help='Font weight to render texts in.')
@click.option('-l', '--language',
help='RFC-3066 language tag for language-dependent font shaping')
@click.option('-ll', '--max-length', type=click.INT, default=None,
help="Discard lines above length (in Unicode codepoints).")
@click.option('--strip/--no-strip', help="Remove whitespace from start and end "
"of lines.")
@click.option('-D', '--disable-degradation', is_flag=True, help='Dont degrade '
'output lines.')
@click.option('-a', '--alpha', type=click.FLOAT, default=1.5,
help="Mean of folded normal distribution for sampling foreground pixel flip probability")
@click.option('-b', '--beta', type=click.FLOAT, default=1.5,
help="Mean of folded normal distribution for sampling background pixel flip probability")
@click.option('-d', '--distort', type=click.FLOAT, default=1.0,
help='Mean of folded normal distribution to take distortion values from')
@click.option('-ds', '--distortion-sigma', type=click.FLOAT, default=20.0,
help='Mean of folded normal distribution to take standard deviations for the '
'Gaussian kernel from')
@click.option('--legacy/--no-legacy', default=False,
help='Use ocropy-style degradations')
@click.option('-o', '--output', type=click.Path(), default='training_data',
help='Output directory')
@click.argument('text', nargs=-1, type=click.Path(exists=True))
def line_generator(ctx, font, maxlines, encoding, normalization, renormalize,
reorder, font_size, font_weight, language, max_length, strip,
disable_degradation, alpha, beta, distort, distortion_sigma,
legacy, output, text):
"""
Generates artificial text line training data.
"""
import errno
import numpy as np
from kraken import linegen
from kraken.lib.util import make_printable
lines: Set[str] = set()
if not text:
return
with KrakenProgressBar() as progress:
read_task = progress.add_task('Reading texts', total=len(text), visible=True if not ctx.meta['verbose'] else False)
for t in text:
with click.open_file(t, encoding=encoding) as fp:
logger.info('Reading {}'.format(t))
for line in fp:
lines.add(line.rstrip('\r\n'))
progress.update(read_task, advance=1)
if normalization:
lines = set([unicodedata.normalize(normalization, line) for line in lines])
if strip:
lines = set([line.strip() for line in lines])
if max_length:
lines = set([line for line in lines if len(line) < max_length])
logger.info('Read {} lines'.format(len(lines)))
message('Read {} unique lines'.format(len(lines)))
if maxlines and maxlines < len(lines):
message('Sampling {} lines\t'.format(maxlines), nl=False)
llist = list(lines)
lines = set(llist[idx] for idx in np.random.randint(0, len(llist), maxlines))
message('\u2713', fg='green')
try:
os.makedirs(output)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# calculate the alphabet and print it for verification purposes
alphabet: Set[str] = set()
for line in lines:
alphabet.update(line)
chars = []
combining = []
for char in sorted(alphabet):
k = make_printable(char)
if k != char:
combining.append(k)
else:
chars.append(k)
message('Σ (len: {})'.format(len(alphabet)))
message('Symbols: {}'.format(''.join(chars)))
if combining:
message('Combining Characters: {}'.format(', '.join(combining)))
lg = linegen.LineGenerator(font, font_size, font_weight, language)
with KrakenProgressBar() as progress:
gen_task = progress.add_task('Writing images', total=len(lines), visible=True if not ctx.meta['verbose'] else False)
for idx, line in enumerate(lines):
logger.info(line)
try:
if renormalize:
im = lg.render_line(unicodedata.normalize(renormalize, line))
else:
im = lg.render_line(line)
except KrakenCairoSurfaceException as e:
logger.info('{}: {} {}'.format(e.message, e.width, e.height))
continue
if not disable_degradation and not legacy:
im = linegen.degrade_line(im, alpha=alpha, beta=beta)
im = linegen.distort_line(im, abs(np.random.normal(distort)), abs(np.random.normal(distortion_sigma)))
elif legacy:
im = linegen.ocropy_degrade(im)
im.save('{}/{:06d}.png'.format(output, idx))
with open('{}/{:06d}.gt.txt'.format(output, idx), 'wb') as fp:
if reorder:
fp.write(get_display(line).encode('utf-8'))
else:
fp.write(line.encode('utf-8'))
progress.update(gen_task, advance=1)
@cli.command('publish')
@click.pass_context
@click.option('-i', '--metadata', show_default=True,
type=click.File(mode='r', lazy=True), help='Metadata for the '
'model. Will be prompted from the user if not given')
@click.option('-a', '--access-token', prompt=True, help='Zenodo access token')
@click.argument('model', nargs=1, type=click.Path(exists=False, readable=True, dir_okay=False))
def publish(ctx, metadata, access_token, model):
"""
Publishes a model on the zenodo model repository.
"""
import json
import pkg_resources
from functools import partial
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from kraken import repo
from kraken.lib import models
with pkg_resources.resource_stream(__name__, 'metadata.schema.json') as fp:
schema = json.load(fp)
nn = models.load_any(model)
if not metadata:
author = click.prompt('author')
affiliation = click.prompt('affiliation')
summary = click.prompt('summary')
description = click.edit('Write long form description (training data, transcription standards) of the model here')
accuracy_default = None
# take last accuracy measurement in model metadata
if 'accuracy' in nn.nn.user_metadata and nn.nn.user_metadata['accuracy']:
accuracy_default = nn.nn.user_metadata['accuracy'][-1][1] * 100
accuracy = click.prompt('accuracy on test set', type=float, default=accuracy_default)
script = [
click.prompt(
'script',
type=click.Choice(
sorted(
schema['properties']['script']['items']['enum'])),
show_choices=True)]
license = click.prompt(
'license',
type=click.Choice(
sorted(
schema['properties']['license']['enum'])),
show_choices=True)
metadata = {
'authors': [{'name': author, 'affiliation': affiliation}],
'summary': summary,
'description': description,
'accuracy': accuracy,
'license': license,
'script': script,
'name': os.path.basename(model),
'graphemes': ['a']
}
while True:
try:
validate(metadata, schema)
except ValidationError as e:
message(e.message)
metadata[e.path[-1]] = click.prompt(e.path[-1], type=float if e.schema['type'] == 'number' else str)
continue
break
else:
metadata = json.load(metadata)
validate(metadata, schema)
metadata['graphemes'] = [char for char in ''.join(nn.codec.c2l.keys())]
oid = repo.publish_model(model, metadata, access_token, partial(message, '.', nl=False))
message('\nmodel PID: {}'.format(oid))
@cli.command('compile')
@click.pass_context
@click.option('-o', '--output', show_default=True, type=click.Path(), default='model', help='Output model file')
@click.option('--workers', show_default=True, default=1, help='Number of OpenMP threads and workers when running on CPU.')
@click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page']), default='xml', show_default=True,
help='Sets the training data format. In ALTO and PageXML mode all '
'data is extracted from xml files containing both baselines and a '
'link to source images. In `path` mode arguments are image files '
'sharing a prefix up to the last extension with JSON `.path` files '
'containing the baseline information.')
@click.option('--random-split', type=float, nargs=3, default=None, show_default=True,
help='Creates a fixed random split of the input data with the '
'proportions (train, validation, test). Overrides the save split option.')
@click.option('--force-type', type=click.Choice(['bbox', 'baseline']), default=None, show_default=True,
help='Forces the dataset type to a specific value. Can be used to '
'"convert" a line strip-type collection to a baseline-style '
'dataset, e.g. to disable centerline normalization.')
@click.option('--save-splits/--ignore-splits', show_default=True, default=True,
help='Whether to serialize explicit splits contained in XML '
'files. Is ignored in `path` mode.')
@click.option('--recordbatch-size', show_default=True, default=100,
help='Minimum number of records per RecordBatch written to the '
'output file. Larger batches require more transient memory '
'but slightly improve reading performance.')
@click.argument('ground_truth', nargs=-1, type=click.Path(exists=True, dir_okay=False))
def compile(ctx, output, workers, format_type, random_split, force_type, save_splits, recordbatch_size, ground_truth):
"""
Precompiles a binary dataset from a collection of XML files.
"""
if not ground_truth:
raise click.UsageError('No training data was provided to the compile command. Use the `ground_truth` argument.')
from kraken.lib import arrow_dataset
force_type = {'bbox': 'kraken_recognition_bbox',
'baseline': 'kraken_recognition_baseline',
None: None}[force_type]
with KrakenProgressBar() as progress:
extract_task = progress.add_task('Extracting lines', total=0, start=False, visible=True if not ctx.meta['verbose'] else False)
arrow_dataset.build_binary_dataset(ground_truth,
output,
format_type,
workers,
save_splits,
random_split,
force_type,
recordbatch_size,
lambda advance, total: progress.update(extract_task, total=total, advance=advance))
message(f'Output file written to {output}')
if __name__ == '__main__':
cli()
|
mittagessen/kraken
|
kraken/ketos.py
|
Python
|
apache-2.0
| 61,641
|
import json
import os
import re
from configparser import ConfigParser
from pprint import pprint
class Config:
raw_config_object = None
EMAILS_SECTION = ''
MICROPHONE_SECTION = ''
CAMERA_SECTION = ''
FILES_SECTION = ''
def __init__(self, filename: str) -> dict:
self.raw_config_object = self.get_config_object(filename)
self.EMAILS_SECTION = 'EMAILS'
self.MICROPHONE_SECTION = 'MICROPHONE'
self.CAMERA_SECTION = 'CAMERA'
self.FILES_SECTION = 'FILES'
self.validate()
'''
need to raise different
exceptions:
warnings: (if emails is not specified)
error: (if picture directory is not writable,
or microphone details are not ints )
'''
def validate(self):
# TODO add all kind of crappy rules
if re.match('^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4})$',
self.raw_config_object[self.EMAILS_SECTION]['email']) == None:
raise ValueError('Email is not valid')
if not os.access(self.raw_config_object[self.FILES_SECTION]['picture_directory'], os.W_OK):
raise ValueError('Picture directory is not writable')
@staticmethod
def get_config_object(filename) -> dict:
config_parser = ConfigParser()
config_parser.read(filename)
return config_parser
def serialize(self):
return json.JSONEncoder.default(self.raw_config_object)
|
MShel/py_guard
|
config/config.py
|
Python
|
apache-2.0
| 1,472
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supporting utilities for computing associations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from absl import flags
flags.DEFINE_string(
"association_dir", "data/train",
"Directory where the association files, such as raw proportions and "
"implicationals reside.")
flags.DEFINE_string(
"genus_filename", "raw_proportions_by_genus.tsv",
"Output for genus-feature clade affiliations.")
flags.DEFINE_string(
"family_filename", "raw_proportions_by_family.tsv",
"Output for family-feature clade affiliations.")
flags.DEFINE_string(
"neighborhood_filename", "raw_proportions_by_neighborhood.tsv",
"Output for neighborhood-feature affiliations.")
flags.DEFINE_string(
"implicational_filename", "implicational_universals.tsv",
"Output for putative implicational universals.")
FLAGS = flags.FLAGS
|
google-research/google-research
|
constrained_language_typology/compute_associations.py
|
Python
|
apache-2.0
| 1,560
|
from setuptools import setup
setup(name='uptodator',
version='1.0',
description='uptodator image service',
author='Lukasz Szczesny',
author_email='luk@wybcz.pl',
url='http://www.python.org/sigs/distutils-sig/',
)
|
uptodator/badge-service
|
setup.py
|
Python
|
apache-2.0
| 249
|
__author__ = 'prossi'
import skyscape
class TASK:
def __init__(self, obj, connection):
self.__dict__ = dict(obj.attrib)
self.connection = connection
def refresh_status(self):
self.status = (self.connection.get_request(self.href)).status
return self.status
|
skyscape-cloud-services/skyscape_python
|
skyscape/skyscape_task.py
|
Python
|
apache-2.0
| 298
|
# Copyright 2012 OpenStack LLC.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import sys
from mox import ContainsKeyValue
from quantumclient import shell
from quantumclient.quantum.v2_0.port import CreatePort
from quantumclient.quantum.v2_0.port import DeletePort
from quantumclient.quantum.v2_0.port import ListPort
from quantumclient.quantum.v2_0.port import ListRouterPort
from quantumclient.quantum.v2_0.port import ShowPort
from quantumclient.quantum.v2_0.port import UpdatePort
from quantumclient.tests.unit import test_cli20
from quantumclient.tests.unit.test_cli20 import CLITestV20Base
from quantumclient.tests.unit.test_cli20 import MyApp
class CLITestV20Port(CLITestV20Base):
def test_create_port(self):
"""Create port: netid."""
resource = 'port'
cmd = CreatePort(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = [netid]
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_full(self):
"""Create port: --mac_address mac --device_id deviceid netid."""
resource = 'port'
cmd = CreatePort(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--mac_address', 'mac', '--device_id', 'deviceid', netid]
position_names = ['network_id', 'mac_address', 'device_id']
position_values = [netid, 'mac', 'deviceid']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--mac-address', 'mac', '--device-id', 'deviceid', netid]
position_names = ['network_id', 'mac_address', 'device_id']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_tenant(self):
"""Create port: --tenant_id tenantid netid."""
resource = 'port'
cmd = CreatePort(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--tenant_id', 'tenantid', netid, ]
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
# Test dashed options
args = ['--tenant-id', 'tenantid', netid, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_port_tags(self):
"""Create port: netid mac_address device_id --tags a b."""
resource = 'port'
cmd = CreatePort(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = [netid, '--tags', 'a', 'b']
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tags=['a', 'b'])
def test_create_port_secgroup(self):
"""Create port: --security-group sg1_id netid"""
resource = 'port'
cmd = CreatePort(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--security-group', 'sg1_id', netid]
position_names = ['network_id', 'security_groups']
position_values = [netid, ['sg1_id']]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_secgroups(self):
"""Create port: <security_groups> netid
The <security_groups> are
--security-group sg1_id --security-group sg2_id
"""
resource = 'port'
cmd = CreatePort(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--security-group', 'sg1_id',
'--security-group', 'sg2_id',
netid]
position_names = ['network_id', 'security_groups']
position_values = [netid, ['sg1_id', 'sg2_id']]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_list_ports(self):
"""List ports: -D."""
resources = "ports"
cmd = ListPort(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_ports_pagination(self):
resources = "ports"
cmd = ListPort(MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_ports_sort(self):
"""list ports: --sort-key name --sort-key id --sort-key asc
--sort-key desc
"""
resources = "ports"
cmd = ListPort(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_ports_limit(self):
"""list ports: -P"""
resources = "ports"
cmd = ListPort(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_list_ports_tags(self):
"""List ports: -- --tags a b."""
resources = "ports"
cmd = ListPort(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, tags=['a', 'b'])
def test_list_ports_detail_tags(self):
"""List ports: -D -- --tags a b."""
resources = "ports"
cmd = ListPort(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, detail=True, tags=['a', 'b'])
def test_list_ports_fields(self):
"""List ports: --fields a --fields b -- --fields c d."""
resources = "ports"
cmd = ListPort(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
fields_1=['a', 'b'], fields_2=['c', 'd'])
def _test_list_router_port(self, resources, cmd,
myid, detail=False, tags=[],
fields_1=[], fields_2=[]):
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: [{'id': 'myid1', },
{'id': 'myid2', }, ], }
resstr = self.client.serialize(reses)
# url method body
query = ""
args = detail and ['-D', ] or []
if fields_1:
for field in fields_1:
args.append('--fields')
args.append(field)
args.append(myid)
if tags:
args.append('--')
args.append("--tag")
for tag in tags:
args.append(tag)
if (not tags) and fields_2:
args.append('--')
if fields_2:
args.append("--fields")
for field in fields_2:
args.append(field)
fields_1.extend(fields_2)
for field in fields_1:
if query:
query += "&fields=" + field
else:
query = "fields=" + field
for tag in tags:
if query:
query += "&tag=" + tag
else:
query = "tag=" + tag
if detail:
query = query and query + '&verbose=True' or 'verbose=True'
query = query and query + '&device_id=%s' or 'device_id=%s'
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.end_url(path, query % myid), 'GET',
body=None,
headers=ContainsKeyValue('X-Auth-Token',
test_cli20.TOKEN)).AndReturn(
(test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertTrue('myid1' in _str)
def test_list_router_ports(self):
"""List router ports: -D."""
resources = "ports"
cmd = ListRouterPort(MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd,
self.test_id, True)
def test_list_router_ports_tags(self):
"""List router ports: -- --tags a b."""
resources = "ports"
cmd = ListRouterPort(MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd,
self.test_id, tags=['a', 'b'])
def test_list_router_ports_detail_tags(self):
"""List router ports: -D -- --tags a b."""
resources = "ports"
cmd = ListRouterPort(MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd, self.test_id,
detail=True, tags=['a', 'b'])
def test_list_router_ports_fields(self):
"""List ports: --fields a --fields b -- --fields c d."""
resources = "ports"
cmd = ListRouterPort(MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd, self.test_id,
fields_1=['a', 'b'],
fields_2=['c', 'd'])
def test_update_port(self):
"""Update port: myid --name myname --tags a b."""
resource = 'port'
cmd = UpdatePort(MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b'],
{'name': 'myname', 'tags': ['a', 'b'], }
)
def test_update_port_security_group_off(self):
"""Update port: --no-security-groups myid."""
resource = 'port'
cmd = UpdatePort(MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['--no-security-groups', 'myid'],
{'security_groups': None})
def test_show_port(self):
"""Show port: --fields id --fields name myid."""
resource = 'port'
cmd = ShowPort(MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_delete_port(self):
"""Delete port: myid."""
resource = 'port'
cmd = DeletePort(MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
|
wallnerryan/quantum_migrate
|
quantumclient/tests/unit/test_cli20_port.py
|
Python
|
apache-2.0
| 12,010
|
# Copyright 2017 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import yaml
from vitrage.common.constants import TemplateStatus
from vitrage.common.constants import TemplateTypes as TType
from vitrage.evaluator.template_db.template_repository import \
add_templates_to_db
from vitrage import storage
from vitrage.storage.sqlalchemy import models
class TestConfiguration(object):
def add_db(self):
db_name = "sqlite:///test-%s-%s.db" % (type(self).__name__,
sys.version_info[0])
self.config(group='database', connection=db_name)
self._db = storage.get_connection_from_config()
engine = self._db._engine_facade.get_engine()
models.Base.metadata.drop_all(engine)
models.Base.metadata.create_all(engine)
return self._db
def add_templates(self, templates_dir, templates_type=TType.STANDARD):
yamls = [t for t in TestConfiguration.load_yaml_files(templates_dir)]
templates = add_templates_to_db(self._db, yamls, templates_type)
for t in templates:
if t.status == TemplateStatus.LOADING:
self._db.templates.update(t.uuid, 'status',
TemplateStatus.ACTIVE)
if t.status == TemplateStatus.DELETING:
self._db.templates.update(t.uuid, 'status',
TemplateStatus.DELETED)
return templates
@staticmethod
def load_yaml_files(path):
if os.path.isdir(path):
file_paths = [path + "/" + fn for fn in os.listdir(path)
if os.path.isfile(path + "/" + fn)]
else:
file_paths = [path]
yamls = []
for file_path in file_paths:
try:
yamls.append(TestConfiguration._load_yaml_file(file_path))
except Exception:
continue
return yamls
@staticmethod
def _load_yaml_file(path):
with open(path, 'r') as stream:
return yaml.load(stream, Loader=yaml.BaseLoader)
|
openstack/vitrage
|
vitrage/tests/functional/test_configuration.py
|
Python
|
apache-2.0
| 2,629
|
import logging
logger = logging.getLogger(__name__)
def sizeof_fmt(num):
if num is None:
logger.error('Error: number for human readable filesize is "None"' % num)
return None
for x in ['bytes','KB','MB','GB','TB']:
try:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
except:
logger.error('Error: could not convert %s to human readable filesize' % num)
return None
return None
|
IQSS/geoconnect
|
gc_apps/geo_utils/fsize_human_readable.py
|
Python
|
apache-2.0
| 528
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import csv
import logging
from collections import defaultdict
from textwrap import fill, indent
from pants.backend.project_info.dependees import Dependees, DependeesRequest
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import InterpreterConstraintsField
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.engine.addresses import Addresses
from pants.engine.console import Console
from pants.engine.goal import Goal, GoalSubsystem, Outputting
from pants.engine.rules import Get, MultiGet, collect_rules, goal_rule
from pants.engine.target import (
AllTargetsRequest,
AllUnexpandedTargets,
RegisteredTargetTypes,
TransitiveTargets,
TransitiveTargetsRequest,
)
from pants.engine.unions import UnionMembership
from pants.option.option_types import BoolOption
from pants.util.docutil import bin_name
logger = logging.getLogger(__name__)
class PyConstraintsSubsystem(Outputting, GoalSubsystem):
name = "py-constraints"
help = "Determine what Python interpreter constraints are used by files/targets."
summary = BoolOption(
"--summary",
default=False,
help=(
"Output a CSV summary of interpreter constraints for your whole repository. The "
"headers are `Target`, `Constraints`, `Transitive Constraints`, `# Dependencies`, "
"and `# Dependees`.\n\nThis information can be useful when prioritizing a "
"migration from one Python version to another (e.g. to Python 3). Use "
"`# Dependencies` and `# Dependees` to help prioritize which targets are easiest "
"to port (low # dependencies) and highest impact to port (high # dependees).\n\n"
"Use a tool like Pandas or Excel to process the CSV. Use the option "
"`--py-constraints-output-file=summary.csv` to write directly to a file."
),
)
class PyConstraintsGoal(Goal):
subsystem_cls = PyConstraintsSubsystem
@goal_rule
async def py_constraints(
addresses: Addresses,
console: Console,
py_constraints_subsystem: PyConstraintsSubsystem,
python_setup: PythonSetup,
registered_target_types: RegisteredTargetTypes,
union_membership: UnionMembership,
) -> PyConstraintsGoal:
if py_constraints_subsystem.summary:
if addresses:
console.print_stderr(
"The `py-constraints --summary` goal does not take file/target arguments. Run "
"`help py-constraints` for more details."
)
return PyConstraintsGoal(exit_code=1)
# TODO: Stop including the target generator? I don't think it's relevant for this goal.
all_targets = await Get(AllUnexpandedTargets, AllTargetsRequest())
all_python_targets = tuple(
t for t in all_targets if t.has_field(InterpreterConstraintsField)
)
constraints_per_tgt = [
InterpreterConstraints.create_from_targets([tgt], python_setup)
for tgt in all_python_targets
]
transitive_targets_per_tgt = await MultiGet(
Get(TransitiveTargets, TransitiveTargetsRequest([tgt.address]))
for tgt in all_python_targets
)
transitive_constraints_per_tgt = [
InterpreterConstraints.create_from_targets(transitive_targets.closure, python_setup)
for transitive_targets in transitive_targets_per_tgt
]
dependees_per_root = await MultiGet(
Get(Dependees, DependeesRequest([tgt.address], transitive=True, include_roots=False))
for tgt in all_python_targets
)
data = [
{
"Target": tgt.address.spec,
"Constraints": str(constraints),
"Transitive Constraints": str(transitive_constraints),
"# Dependencies": len(transitive_targets.dependencies),
"# Dependees": len(dependees),
}
for tgt, constraints, transitive_constraints, transitive_targets, dependees in zip(
all_python_targets,
constraints_per_tgt,
transitive_constraints_per_tgt,
transitive_targets_per_tgt,
dependees_per_root,
)
]
with py_constraints_subsystem.output_sink(console) as stdout:
writer = csv.DictWriter(
stdout,
fieldnames=[
"Target",
"Constraints",
"Transitive Constraints",
"# Dependencies",
"# Dependees",
],
)
writer.writeheader()
for entry in data:
writer.writerow(entry)
return PyConstraintsGoal(exit_code=0)
transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(addresses))
final_constraints = InterpreterConstraints.create_from_targets(
transitive_targets.closure, python_setup
)
if not final_constraints:
target_types_with_constraints = sorted(
tgt_type.alias
for tgt_type in registered_target_types.types
if tgt_type.class_has_field(InterpreterConstraintsField, union_membership)
)
logger.warning(
"No Python files/targets matched for the `py-constraints` goal. All target types with "
f"Python interpreter constraints: {', '.join(target_types_with_constraints)}"
)
return PyConstraintsGoal(exit_code=0)
constraints_to_addresses = defaultdict(set)
for tgt in transitive_targets.closure:
constraints = InterpreterConstraints.create_from_targets([tgt], python_setup)
if not constraints:
continue
constraints_to_addresses[constraints].add(tgt.address)
with py_constraints_subsystem.output(console) as output_stdout:
output_stdout(f"Final merged constraints: {final_constraints}\n")
if len(addresses) > 1:
merged_constraints_warning = (
"(These are the constraints used if you were to depend on all of the input "
"files/targets together, even though they may end up never being used together in "
"the real world. Consider using a more precise query or running "
f"`{bin_name()} py-constraints --summary`.)\n"
)
output_stdout(indent(fill(merged_constraints_warning, 80), " "))
for constraint, addrs in sorted(constraints_to_addresses.items()):
output_stdout(f"\n{constraint}\n")
for addr in sorted(addrs):
output_stdout(f" {addr}\n")
return PyConstraintsGoal(exit_code=0)
def rules():
return collect_rules()
|
pantsbuild/pants
|
src/python/pants/backend/python/mixed_interpreter_constraints/py_constraints.py
|
Python
|
apache-2.0
| 6,969
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# recommended pylint: pylint ekerberos.py -d maybe-no-member,line-too-long --indent-string " "
# recommended formating: autopep8 --indent-size 2 -i --ignore E501 ekerberos.py
DOCUMENTATION = '''This module will create refresh kerberos principal for specified user on his own account if it will expire in next 15 min.'''
EXAMPLES = '''
You have to specify user and password for kerberos
- name: Refresh
ekerberos: usr='root' pass='kerberos_password'
'''
from ansible.module_utils.basic import *
from subprocess import PIPE, Popen
from datetime import datetime, timedelta
from re import match
# arguments that the module gets in various actions
MODULE_ARGUMENTS = {
'usr': {'type': 'str', 'required': True},
'pass': {'type': 'str', 'required': True}
}
def execute(cmd):
proc = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
proc.wait()
return out, err
def main():
module = AnsibleModule(argument_spec=MODULE_ARGUMENTS)
# script will only set password at start, at creation time. If you want change it you have to delete user at start
usr_a = module.params.get('usr', None)
pass_a = module.params.get('pass', None)
std_o, err_o = execute('sudo -u "{0}" klist'.format(usr_a))
kerberos_renew = False
if std_o == '' or (err_o != None and err_o != ''):
kerberos_renew = True
else:
std_lines = [a.replace('\n', '').replace('\t', '') for a in std_o.split('\n')]
if match('^Default principal: {0}@.*$'.format(usr_a), std_lines[1]) == None:
kerberos_renew = True
else:
# Extracting principal expire date
expire_date = 'T'.join([a.replace(' ', '').replace('/', ':') for a in std_lines[-3].split(' ')][3:5])
# Checking if principal will expire in next 15 minutes or is already expired
if datetime.now() > datetime.strptime(expire_date, '%m:%d:%yT%H:%M:%S') - timedelta(minutes=15):
kerberos_renew = True
if kerberos_renew:
std_o, err_o = execute('echo -e "{1}\n" | sudo -u "{0}" kinit "{0}"'.format(usr_a, pass_a))
if err_o != None and err_o != '':
module.fail_json(msg='Something is wrong with kerberos ')
module.exit_json(changed=kerberos_renew, msg='Everything is done ')
main()
|
trustedanalytics/one-click-deployment
|
ansible/roles/cloudera_cdh/library/ekerberos.py
|
Python
|
apache-2.0
| 2,865
|
# Copyright (c) 2016 SAP SE
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from tempest import clients
from tempest import config
from tempest.lib.services import clients as cli
CONF = config.CONF
class Manager(clients.Manager):
def __init__(self, credentials=None):
super(Manager, self).__init__(credentials)
class Clients(cli.ServiceClients):
"""Tempest stable service clients and loaded plugins service clients"""
def __init__(self, credentials, service=None):
"""Emulate the interface of Tempest's clients.Manager"""
# Identity settings
if CONF.identity.auth_version == 'v2':
identity_uri = CONF.identity.uri
else:
identity_uri = CONF.identity.uri_v3
super(Clients, self).__init__(credentials, identity_uri)
|
vakwetu/novajoin_tempest_plugin
|
novajoin_tempest_plugin/clients.py
|
Python
|
apache-2.0
| 1,301
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
# from numpy import round
from numpy import zeros
from . import LoadReductions
from .Input.Animals.TotAEU import TotAEU_f
from .Input.Animals.TotLAEU import TotLAEU
from .Input.Animals.TotPAEU import TotPAEU_f
from .Input.LandUse.Ag.AvTileDrain import AvTileDrain_f
from .Input.LandUse.AreaTotal import AreaTotal_f
from .Input.WaterBudget.AvEvapoTrans import AvEvapoTrans_f
from .Input.WaterBudget.AvGroundWater import AvGroundWater_f
from .Input.WaterBudget.AvWithdrawal import AvWithdrawal_f
from .MultiUse_Fxns.AttenN import AttenN
from .MultiUse_Fxns.Constants import NPConvert
from .MultiUse_Fxns.Erosion.AvErosion import AvErosion_f
from .MultiUse_Fxns.Erosion.AvSedYield import AvSedYield
from .MultiUse_Fxns.Erosion.AvSedYield import AvSedYield_f
from .MultiUse_Fxns.Erosion.AvStreamBankNSum import AvStreamBankNSum_f
from .MultiUse_Fxns.Erosion.SedDelivRatio import SedDelivRatio
from .MultiUse_Fxns.Runoff.AvRunoff import AvRunoff_f
from .MultiUse_Fxns.Runoff.RetentFactorN import RetentFactorN
from .Output.AvAnimalNSum.AvAnimalNSum_1 import AvAnimalNSum_1_f
from .Output.AvAnimalNSum.N7b_1 import N7b_1_f
from .Output.Loading.LuTotNitr_1 import LuTotNitr_1_f
from .Output.Loading.LuTotPhos import LuTotPhos_f
from .Output.Loading.StreamBankNSum import StreamBankNSum_f
from .enums import YesOrNo, LandUse
log = logging.getLogger(__name__)
CM_TO_M = 1 / 100
HA_TO_M2 = 10000
KG_TO_MG = 1000000
M3_TO_L = 1000
TONNE_TO_KG = 1000
def WriteOutput(z):
# DIMENSION VARIABLES FOR PREDICT CALCULATION AND SCENARIO FILE
AvOtherLuSed = 0
AvOtherLuNitr = 0
AvOtherLuPhos = 0
TotSewerSys = 0
TotNormSys = 0
TotShortSys = 0
TotSeptSys = 0
TotAvLuErosion = 0
AvTotalSed = 0
AvDisN = 0
AvTotalN = 0
AvDisP = 0
AvTotalP = 0
n2t = 0
n6t = 0
n13t = 0
n24t = 0
AreaSum = zeros(12)
# INSERT VALUES FOR BMP SCENARIO FILE FOR PREDICT APPLICATION
for l in range(z.NLU):
z.AvLuSedYield[l] = (z.AvLuSedYield[l] * z.RetentFactorSed) * (1 - z.AttenTSS)
z.AvLuDisNitr[l] = (z.AvLuDisNitr[l] * RetentFactorN(z.ShedAreaDrainLake, z.RetentNLake)) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN))
z.AvLuTotNitr[l] = (z.AvLuTotNitr[l] * RetentFactorN(z.ShedAreaDrainLake, z.RetentNLake)) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN))
z.AvLuDisPhos[l] = (z.AvLuDisPhos[l] * z.RetentFactorP) * (1 - z.AttenP)
z.AvLuTotPhos[l] = (z.AvLuTotPhos[l] * z.RetentFactorP) * (1 - z.AttenP)
# SET THE SCENARIO VALUES TO LANDUSE LOADS
for l in range(z.NRur):
if z.Landuse[l] is LandUse.HAY_PAST:
z.n2 = z.AvLuSedYield[l]
z.n6 = z.AvLuTotNitr[l]
z.n13 = z.AvLuTotPhos[l]
z.n6dn = z.AvLuDisNitr[l]
z.n13dp = z.AvLuDisPhos[l]
z.n24 = round(z.Area[l])
elif z.Landuse[l] is LandUse.CROPLAND:
z.n1 = z.AvLuSedYield[l]
z.n5 = z.AvLuTotNitr[l]
z.n12 = z.AvLuTotPhos[l]
z.n5dn = z.AvLuDisNitr[l]
z.n12dp = z.AvLuDisPhos[l]
z.n23 = round(z.Area[l])
elif z.Landuse[l] is LandUse.TURFGRASS:
z.n2t = z.AvLuSedYield[l]
z.n6t = z.AvLuTotNitr[l]
z.n13t = z.AvLuTotPhos[l]
z.n24t = round(z.Area[l])
elif z.Landuse[l] is LandUse.UNPAVED_ROAD:
z.n2d = z.AvLuSedYield[l]
z.n6d = z.AvLuTotNitr[l]
z.n13d = z.AvLuTotPhos[l]
z.n6ddn = z.AvLuDisNitr[l]
z.n13ddp = z.AvLuDisPhos[l]
else:
AvOtherLuSed = AvOtherLuSed + z.AvLuSedYield[l]
AvOtherLuNitr = AvOtherLuNitr + z.AvLuTotNitr[l]
AvOtherLuPhos = AvOtherLuPhos + z.AvLuTotPhos[l]
z.n2c = 0
z.n6c = 0
z.n13c = 0
z.n24b = 0
z.n2b = 0
z.n6b = 0
z.n13b = 0
z.n23b = 0
z.n6cdn = 0
z.n13cdp = 0
z.n6bdn = 0
z.n13bdp = 0
for l in range(z.NRur, z.NLU):
if z.Landuse[l] in [LandUse.LD_MIXED, LandUse.LD_RESIDENTIAL]:
z.n2c = z.n2c + z.AvLuSedYield[l]
z.n6c = z.n6c + z.AvLuTotNitr[l]
z.n13c = z.n13c + z.AvLuTotPhos[l]
z.n6cdn = z.n6cdn + z.AvLuDisNitr[l]
z.n13cdp = z.n13cdp + z.AvLuDisPhos[l]
z.n24b = z.n24b + round(z.Area[l])
elif z.Landuse[l] in [LandUse.MD_MIXED, LandUse.HD_MIXED,
LandUse.MD_RESIDENTIAL, LandUse.HD_RESIDENTIAL]:
z.n2b = z.n2b + z.AvLuSedYield[l]
z.n6b = z.n6b + z.AvLuTotNitr[l]
z.n13b = z.n13b + z.AvLuTotPhos[l]
z.n6bdn = z.n6bdn + z.AvLuDisNitr[l]
z.n13bdp = z.n13bdp + z.AvLuDisPhos[l]
z.n23b = z.n23b + round(z.Area[l])
# FOR POINT SOURCE
YrPointNitr = 0
YrPointPhos = 0
for i in range(0, 12):
YrPointNitr = YrPointNitr + z.PointNitr[i]
YrPointPhos = YrPointPhos + z.PointPhos[i]
# GET THE AVERAGE SEPTIC SYSTEM INFORMATION
if z.SepticFlag is YesOrNo.YES:
for i in range(12):
TotSewerSys = TotSewerSys + z.NumSewerSys[i]
TotNormSys = TotNormSys + z.NumNormalSys[i]
TotShortSys = TotShortSys + z.NumShortSys[i]
TotSeptSys = (TotSeptSys + z.NumNormalSys[i] + z.NumShortSys[i] +
z.NumPondSys[i] + z.NumDischargeSys[i])
# Set the conversion factors from metric to english
SedConvert = 1000
SedConvert = 1
# Get the animal nuntient loads
z.GRLBP = z.AvGRLostBarnPSum
z.NGLBP = z.AvNGLostBarnPSum
z.NGLManP = z.AvNGLostManPSum
# Get the fecal coliform values
z.NGLBFC = z.AvNGLostBarnFCSum
z.GRLBFC = z.AvGRLostBarnFCSum
z.GRSFC = z.AvGRStreamFC
z.GRSP = z.AvGRStreamP
# Get the initial pathogen loads
z.n139 = z.AvAnimalFCSum
z.n140 = z.AvWWOrgsSum
z.n146 = z.AvWWOrgsSum
z.n141 = z.AvSSOrgsSum
z.n147 = z.AvSSOrgsSum
z.n142 = z.AvUrbOrgsSum
z.n143 = z.AvWildOrgsSum
z.n149 = z.AvWildOrgsSum
# FARM ANIMAL LOADS
z.n14b = z.AvAnimalPSum
# Get the AEUs
z.n41j = round(TotLAEU(z.NumAnimals, z.AvgAnimalWt))
z.n41k = round(TotPAEU_f(z.NumAnimals, z.AvgAnimalWt))
z.n41l = round(TotAEU_f(z.NumAnimals, z.AvgAnimalWt))
# CONVERT AVERAGE STREAM BANK ERIOSION, N AND P TO ENGLISH UNITS
z.n4 = round(z.AvStreamBankErosSum * z.RetentFactorSed * (1 - z.AttenTSS) * SedConvert)
z.n8 = round(AvStreamBankNSum_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper,
z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap,
z.SatStor_0, z.RecessionCoef, z.SeepCoef,
z.Qretention, z.PctAreaInfil, z.n25b, z.Landuse, z.TileDrainDensity, z.PointFlow,
z.StreamWithdrawal,
z.GroundWithdrawal, z.NumAnimals, z.AvgAnimalWt, z.StreamFlowVolAdj, z.SedAFactor_0,
z.AvKF, z.AvSlope,
z.SedAAdjust, z.StreamLength, z.n42b, z.n46c, z.n85d, z.AgLength, z.n42, z.n54,
z.n85,
z.UrbBankStab, z.SedNitr,
z.BankNFrac, z.n69c, z.n45, z.n69) * NPConvert * RetentFactorN(z.ShedAreaDrainLake,
z.RetentNLake) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN)))
z.n15 = round(z.AvStreamBankPSum * NPConvert * z.RetentFactorP * (1 - z.AttenP))
# PERFORM LOAD REDUCTIONS BASED ON BMPS IN SCENARIO FILE
LoadReductions.AdjustScnLoads(z)
# CONVERT AVERAGE STREAM BANK ERIOSION, N AND P TO ENGLISH UNITS
z.AvStreamBankErosSum = z.n4
z.AvStreamBankNSum = z.n8
z.AvStreamBankPSum = z.n15
z.AvAnimalFCSum = z.n145
z.AvUrbOrgsSum = z.n148
# Get the FC reduction for monthly loads
UrbanFCFrac = 0
FarmFCFrac = 0
if z.n139 > 0:
FarmFCFrac = z.n145 / z.n139
if z.n142 > 0:
UrbanFCFrac = z.n148 / z.n142
for i in range(12):
z.AvAnimalFC[i] = z.AvAnimalFC[i] * FarmFCFrac
z.AvUrbOrgs[i] = z.AvUrbOrgs[i] * UrbanFCFrac
# Reset the existing urban and animal FC loads to the reduced future loads, n145 and n148
z.n139 = z.n145
z.n142 = z.n148
# Initial pathogen total load
z.n144 = z.n139 + z.n140 + z.n141 + z.n142 + z.n143
# Reduced total pathogen loads
z.n150 = z.n145 + z.n146 + z.n147 + z.n148 + z.n149
z.AvTotalOrgsSum = z.n150
# FARM ANIMAL LOAD REDUCTION FOR N AND P
z.AvAnimalPSum = z.n14b
z.n14b = z.n14b * NPConvert
z.GRLBP = z.GRLBP * NPConvert
z.NGLBP = z.NGLBP * NPConvert
z.NGLManP = z.NGLManP * NPConvert
z.GRSP = z.AvGRStreamP * NPConvert
# RESET GWLF OUTPUT VALUES FOR RURAL LANDUSE TO REDUCED LOADS AND CONVERT SCENARIO VALUES
for l in range(z.NLU):
if z.Landuse[l] is LandUse.HAY_PAST:
z.AvLuSedYield[l] = z.n2
z.AvLuTotNitr[l] = z.n6
z.AvLuTotPhos[l] = z.n13
z.AvLuDisNitr[l] = z.n6dn
z.AvLuDisPhos[l] = z.n13dp
if z.AvLuDisNitr[l] > z.AvLuTotNitr[l]:
z.AvLuDisNitr[l] = z.AvLuTotNitr[l]
if z.AvLuDisPhos[l] > z.AvLuTotPhos[l]:
z.AvLuDisPhos[l] = z.AvLuTotPhos[l]
z.n2 = round(z.AvLuSedYield[l] * SedConvert)
z.n6 = round(z.AvLuTotNitr[l] * NPConvert)
z.n13 = round(z.AvLuTotPhos[l] * NPConvert)
if z.Area[l] > 0:
AreaSum[2] = AreaSum[2] + z.Area[l]
elif z.Landuse[l] is LandUse.CROPLAND:
z.AvLuSedYield[l] = z.n1
z.AvLuTotNitr[l] = z.n5
z.AvLuTotPhos[l] = z.n12
z.AvLuDisNitr[l] = z.n5dn
z.AvLuDisPhos[l] = z.n12dp
if z.AvLuDisNitr[l] > z.AvLuTotNitr[l]:
z.AvLuDisNitr[l] = z.AvLuTotNitr[l]
if z.AvLuDisPhos[l] > z.AvLuTotPhos[l]:
z.AvLuDisPhos[l] = z.AvLuTotPhos[l]
z.n1 = round(z.AvLuSedYield[l] * SedConvert)
z.n5 = round(z.AvLuTotNitr[l] * NPConvert)
z.n12 = round(z.AvLuTotPhos[l] * NPConvert)
if z.Area[l] > 0:
AreaSum[3] = AreaSum[3] + z.Area[l]
elif z.Landuse[l] is LandUse.UNPAVED_ROAD:
z.AvLuSedYield[l] = z.n2d
z.AvLuTotNitr[l] = z.n6d
z.AvLuTotPhos[l] = z.n13d
z.AvLuDisNitr[l] = z.n6ddn
z.AvLuDisPhos[l] = z.n13ddp
if z.AvLuDisNitr[l] > z.AvLuTotNitr[l]:
z.AvLuDisNitr[l] = z.AvLuTotNitr[l]
if z.AvLuDisPhos[l] > z.AvLuTotPhos[l]:
z.AvLuDisPhos[l] = z.AvLuTotPhos[l]
z.n2d = round(z.AvLuSedYield[l] * SedConvert)
z.n6d = round(z.AvLuTotNitr[l] * NPConvert)
z.n13d = round(z.AvLuTotPhos[l] * NPConvert)
if z.Area[l] > 0:
AreaSum[6] = AreaSum[6] + z.Area[l]
if z.AvLuDisNitr[l] > z.AvLuTotNitr[l]:
z.AvLuDisNitr[l] = z.AvLuTotNitr[l]
if z.AvLuDisPhos[l] > z.AvLuTotPhos[l]:
z.AvLuDisPhos[l] = z.AvLuTotPhos[l]
# GET THE AVERAGE TOTAL LOADS BY SOURCE
TotAvLuErosion = TotAvLuErosion + z.AvLuErosion[l]
AvTotalSed = AvTotalSed + z.AvLuSedYield[l]
AvDisN = AvDisN + z.AvLuDisNitr[l]
AvTotalN = AvTotalN + z.AvLuTotNitr[l]
AvDisP = AvDisP + z.AvLuDisPhos[l]
AvTotalP = AvTotalP + z.AvLuTotPhos[l]
# Reset the urban landuse values
for l in range(z.NRur, z.NLU):
if z.n24b > 0 and z.Landuse[l] in [LandUse.LD_MIXED, LandUse.LD_RESIDENTIAL]:
z.AvLuSedYield[l] = z.n2c * z.Area[l] / z.n24b
z.AvLuTotNitr[l] = z.n6c * z.Area[l] / z.n24b
z.AvLuTotPhos[l] = z.n13c * z.Area[l] / z.n24b
z.AvLuDisNitr[l] = z.n6cdn * z.Area[l] / z.n24b
z.AvLuDisPhos[l] = z.n13cdp * z.Area[l] / z.n24b
if z.AvLuDisNitr[l] > z.AvLuTotNitr[l]:
z.AvLuDisNitr[l] = z.AvLuTotNitr[l]
if z.AvLuDisPhos[l] > z.AvLuTotPhos[l]:
z.AvLuDisPhos[l] = z.AvLuTotPhos[l]
if z.Area[l] > 0:
AreaSum[0] = AreaSum[0] + z.Area[l]
elif z.n23b > 0 and z.Landuse[l] in [LandUse.MD_MIXED, LandUse.HD_MIXED,
LandUse.MD_RESIDENTIAL, LandUse.HD_RESIDENTIAL]:
z.AvLuSedYield[l] = z.n2b * z.Area[l] / z.n23b
z.AvLuTotNitr[l] = z.n6b * z.Area[l] / z.n23b
z.AvLuTotPhos[l] = z.n13b * z.Area[l] / z.n23b
z.AvLuDisNitr[l] = z.n6bdn * z.Area[l] / z.n23b
z.AvLuDisPhos[l] = z.n13bdp * z.Area[l] / z.n23b
if z.AvLuDisNitr[l] > z.AvLuTotNitr[l]:
z.AvLuDisNitr[l] = z.AvLuTotNitr[l]
if z.AvLuDisPhos[l] > z.AvLuTotPhos[l]:
z.AvLuDisPhos[l] = z.AvLuTotPhos[l]
if z.Area[l] > 0:
AreaSum[1] = AreaSum[1] + z.Area[l]
z.n2c = round(z.n2c * SedConvert)
z.n6c = round(z.n6c * NPConvert)
z.n13c = round(z.n13c * NPConvert)
z.n2b = round(z.n2b * SedConvert)
z.n6b = round(z.n6b * NPConvert)
z.n13b = round(z.n13b * NPConvert)
# FORMAT VALUES FOR PREDICT SCENARIO FILE
z.n22 = round(AreaTotal_f(z.Area), 0)
# OBTAIN THE AVERAGE TOTAL MONTHLY LOADS
AvMonDisN = 0
AvMonTotN = 0
AvMonDisP = 0
AvMonTotP = 0
AvMonSed = 0
AvMonEros = 0
for i in range(12):
AvMonEros = AvMonEros + \
AvErosion_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0,
z.AntMoist_0,
z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV, z.PcntET,
z.DayHrs, z.MaxWaterCap,
z.SatStor_0, z.RecessionCoef, z.SeepCoef, z.Qretention, z.PctAreaInfil, z.n25b,
z.Landuse, z.TileDrainDensity, z.PointFlow,
z.StreamWithdrawal, z.GroundWithdrawal, z.NumAnimals, z.AvgAnimalWt, z.StreamFlowVolAdj,
z.SedAFactor_0,
z.AvKF, z.AvSlope, z.SedAAdjust, z.StreamLength, z.n42b, z.n46c, z.n85d, z.AgLength,
z.n42, z.n45, z.n85, z.UrbBankStab,
z.SedDelivRatio_0, z.Acoef, z.KF, z.LS, z.C, z.P)[i]
AvMonSed = AvMonSed + (
AvSedYield_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV,
z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0, z.RecessionCoef, z.SeepCoef,
z.Qretention, z.PctAreaInfil, z.n25b, z.Landuse, z.TileDrainDensity, z.PointFlow,
z.StreamWithdrawal, z.GroundWithdrawal, z.NumAnimals, z.AvgAnimalWt,
z.StreamFlowVolAdj, z.SedAFactor_0, z.AvKF, z.AvSlope, z.SedAAdjust, z.StreamLength,
z.n42b, z.n46c, z.n85d, z.AgLength, z.n42, z.n45, z.n85, z.UrbBankStab, z.Acoef, z.KF,
z.LS, z.C, z.P, z.SedDelivRatio_0) * z.RetentFactorSed * (1 - z.AttenTSS))
AvMonDisN = AvMonDisN + (z.AvDisNitr[i] * RetentFactorN(z.ShedAreaDrainLake, z.RetentNLake) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN)))
AvMonTotN = AvMonTotN + (z.AvTotNitr[i] * RetentFactorN(z.ShedAreaDrainLake, z.RetentNLake) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN)))
AvMonDisP = AvMonDisP + (z.AvDisPhos[i] * z.RetentFactorP * (1 - z.AttenP))
AvMonTotP = AvMonTotP + (z.AvTotPhos[i] * z.RetentFactorP * (1 - z.AttenP))
# OBTAIN THE MONTHLY SEPTIC SYSTEM AND SEWER POPULATION VALUES
z.n47 = round(TotSeptSys / 12)
z.n49 = round(TotSeptSys / 12)
z.n53 = round(TotSewerSys / 12)
# CONVERT GROUNDWATER N AND P REDUCED LOADS INTO ENGLISH UNIST FOR THE PREDICT SCENARIO FILE
z.n9 = round(((z.AvGroundNitrSum + z.AvTileDrainNSum) * NPConvert * RetentFactorN(z.ShedAreaDrainLake,
z.RetentNLake) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN))))
z.n16 = round(((z.AvGroundPhosSum + z.AvTileDrainPSum) * NPConvert * z.RetentFactorP * (1 - z.AttenP)))
# CONVERT ANNUAL POINT N AND P TO ENGLISH UNITS
z.n10 = round((YrPointNitr * NPConvert * RetentFactorN(z.ShedAreaDrainLake, z.RetentNLake) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN))))
z.n17 = round((YrPointPhos * NPConvert * z.RetentFactorP * (1 - z.AttenP)))
# CONVERT AVERAGE SEPTIC N AND P TO ENGLISH UNITS
z.n11 = round((z.AvSeptNitr * NPConvert * RetentFactorN(z.ShedAreaDrainLake, z.RetentNLake) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN))))
z.n18 = round((z.AvSeptPhos * NPConvert * z.RetentFactorP * (1 - z.AttenP)))
# ENTER THE OTHER SEDIMENT, N AND P INTO FIELDS
z.n3 = round(((AvOtherLuSed + ((z.AvTileDrainSedSum * z.RetentFactorSed * (1 - z.AttenTSS)) / 1000)) * SedConvert))
z.n7 = round((AvOtherLuNitr * RetentFactorN(z.ShedAreaDrainLake, z.RetentNLake) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN)) * NPConvert))
z.n14 = round((AvOtherLuPhos * z.RetentFactorP * (1 - z.AttenP) * NPConvert))
# ADD TURF TO HAY/PASTURE
z.n2 = z.n2 + (n2t * SedConvert)
z.n6 = z.n6 + (n6t * NPConvert)
z.n13 = z.n13 + (n13t * NPConvert)
z.n24 = z.n24 + n24t
# Multiply sediment loads by 1000 to get them into Kg before writing to PRedICT section of file
z.n1 = z.n1 * 1000
z.n2 = z.n2 * 1000
z.n2b = z.n2b * 1000
z.n2c = z.n2c * 1000
z.n2d = z.n2d * 1000
z.n3 = z.n3 * 1000
# Obtain the totals for sed, z.n az.nd P
# Obtain the totals for sed, N and P
z.n19 = z.n1 + z.n2 + z.n2b + z.n2c + z.n2d + z.n3 + z.n4
z.n20 = z.n5 + z.n6 + z.n6b + z.n6c + z.n6d + z.n7 + N7b_1_f(z.NYrs, z.GrazingAnimal_0, z.NumAnimals, z.AvgAnimalWt,
z.AnimalDailyN, z.NGAppNRate, z.NGPctSoilIncRate,
z.GRAppNRate,
z.GRPctSoilIncRate, z.GrazingNRate, z.GRPctManApp,
z.PctGrazing, z.GRBarnNRate, z.Prec, z.DaysMonth,
z.AWMSGrPct,
z.GrAWMSCoeffN, z.RunContPct, z.RunConCoeffN, z.n41b,
z.n85h, z.NGPctManApp, z.AWMSNgPct, z.NGBarnNRate,
z.NgAWMSCoeffN, z.n41d,
z.n85j, z.n41f, z.n85l, z.PctStreams, z.n42, z.n45,
z.n69, z.n43, z.n64) + z.n8 + z.n9 + z.n10 + z.n11
z.n21 = z.n12 + z.n13 + z.n13b + z.n13c + z.n13d + z.n14 + z.n14b + z.n15 + z.n16 + z.n17 + z.n18
# TODO: Port WriteDailyFlowFile if needed
# WRITE OUTPUT TO THE FILE FOR DAILy Flow
# WriteDailyFlowFile
# SET THE SCENARIO VALUES TO LANDUSE LOADS\
AvOtherLuSed = 0
AvOtherLuPhos = 0
for y in range(z.NYrs):
z.n2c = 0
z.n6c = 0
z.n13c = 0
z.n2b = 0
z.n6b = 0
z.n13b = 0
z.n6cdn = 0
z.n13cdp = 0
z.n6bdn = 0
z.n13bdp = 0
for l in range(z.NLU):
z.LuSedYield[y][l] = round((z.LuSedYield[y][l] * z.RetentFactorSed * (1 - z.AttenTSS)))
z.LuDisNitr[y][l] = round((z.LuDisNitr[y][l] * RetentFactorN(z.ShedAreaDrainLake, z.RetentNLake) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN))))
z.LuDisPhos[y][l] = round((z.LuDisPhos[y][l] * z.RetentFactorP * (1 - z.AttenP)))
z.LuTotPhos_1[y][l] = round(
(LuTotPhos_f(z.NYrs, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.AntMoist_0, z.NRur, z.NUrb, z.CN,
z.Grow_0, z.Area, z.PhosConc, z.ManPhos, z.ManuredAreas, z.FirstManureMonth,
z.LastManureMonth, z.FirstManureMonth2, z.LastManureMonth2, z.SedDelivRatio_0, z.KF,
z.LS, z.C, z.P, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.Qretention, z.PctAreaInfil, z.Nqual,
z.LoadRateImp, z.LoadRatePerv, z.Storm, z.UrbBMPRed, z.FilterWidth, z.PctStrmBuf,
z.Acoef, z.SedPhos, z.CNI_0)[y][l] * z.RetentFactorP * (1 - z.AttenP)))
if z.Landuse[l] is LandUse.HAY_PAST:
z.n2 = z.LuSedYield[y][l]
z.n6 = \
LuTotNitr_1_f(z.NYrs, z.NRur, z.NUrb, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.AntMoist_0, z.CN,
z.Grow_0,
z.Area, z.NitrConc, z.ManNitr, z.ManuredAreas, z.FirstManureMonth, z.LastManureMonth,
z.FirstManureMonth2, z.LastManureMonth2, z.SedDelivRatio_0, z.KF, z.LS, z.C, z.P,
z.SedNitr, z.CNP_0, z.Imper, z.ISRR, z.ISRA,
z.Qretention, z.PctAreaInfil, z.LoadRateImp, z.LoadRatePerv, z.Storm, z.UrbBMPRed,
z.FilterWidth, z.PctStrmBuf, z.Acoef,
z.CNI_0, z.Nqual, z.ShedAreaDrainLake, z.RetentNLake, z.AttenFlowDist, z.AttenFlowVel,
z.AttenLossRateN)[y][l]
z.n13 = z.LuTotPhos_1[y][l]
z.n6dn = z.LuDisNitr[y][l]
z.n13dp = z.LuDisPhos[y][l]
elif z.Landuse[l] is LandUse.CROPLAND:
z.n1 = z.LuSedYield[y][l]
z.n5 = \
LuTotNitr_1_f(z.NYrs, z.NRur, z.NUrb, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.AntMoist_0, z.CN,
z.Grow_0,
z.Area, z.NitrConc, z.ManNitr, z.ManuredAreas, z.FirstManureMonth, z.LastManureMonth,
z.FirstManureMonth2, z.LastManureMonth2, z.SedDelivRatio_0, z.KF, z.LS, z.C, z.P,
z.SedNitr, z.CNP_0, z.Imper, z.ISRR, z.ISRA,
z.Qretention, z.PctAreaInfil, z.LoadRateImp, z.LoadRatePerv, z.Storm, z.UrbBMPRed,
z.FilterWidth, z.PctStrmBuf, z.Acoef,
z.CNI_0, z.Nqual, z.ShedAreaDrainLake, z.RetentNLake, z.AttenFlowDist, z.AttenFlowVel,
z.AttenLossRateN)[y][l]
z.n12 = z.LuTotPhos_1[y][l]
z.n5dn = z.LuDisNitr[y][l]
z.n12dp = z.LuDisPhos[y][l]
elif z.Landuse[l] is LandUse.UNPAVED_ROAD:
z.n2d = z.LuSedYield[y][l]
z.n6d = \
LuTotNitr_1_f(z.NYrs, z.NRur, z.NUrb, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.AntMoist_0, z.CN,
z.Grow_0,
z.Area, z.NitrConc, z.ManNitr, z.ManuredAreas, z.FirstManureMonth, z.LastManureMonth,
z.FirstManureMonth2, z.LastManureMonth2, z.SedDelivRatio_0, z.KF, z.LS, z.C, z.P,
z.SedNitr, z.CNP_0, z.Imper, z.ISRR, z.ISRA,
z.Qretention, z.PctAreaInfil, z.LoadRateImp, z.LoadRatePerv, z.Storm, z.UrbBMPRed,
z.FilterWidth, z.PctStrmBuf, z.Acoef,
z.CNI_0, z.Nqual, z.ShedAreaDrainLake, z.RetentNLake, z.AttenFlowDist, z.AttenFlowVel,
z.AttenLossRateN)[y][l]
z.n13d = z.LuTotPhos_1[y][l]
z.n6ddn = z.LuDisNitr[y][l]
z.n13ddp = z.LuDisPhos[y][l]
elif z.Landuse[l] is LandUse.TURFGRASS:
z.n2t = z.LuSedYield[y][l]
z.n6t = \
LuTotNitr_1_f(z.NYrs, z.NRur, z.NUrb, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.AntMoist_0, z.CN,
z.Grow_0,
z.Area, z.NitrConc, z.ManNitr, z.ManuredAreas, z.FirstManureMonth, z.LastManureMonth,
z.FirstManureMonth2, z.LastManureMonth2, z.SedDelivRatio_0, z.KF, z.LS, z.C, z.P,
z.SedNitr, z.CNP_0, z.Imper, z.ISRR, z.ISRA,
z.Qretention, z.PctAreaInfil, z.LoadRateImp, z.LoadRatePerv, z.Storm, z.UrbBMPRed,
z.FilterWidth, z.PctStrmBuf, z.Acoef,
z.CNI_0, z.Nqual, z.ShedAreaDrainLake, z.RetentNLake, z.AttenFlowDist, z.AttenFlowVel,
z.AttenLossRateN)[y][l]
z.n13t = z.LuTotPhos_1[y][l]
else:
AvOtherLuSed = AvOtherLuSed + z.LuSedYield[y][l]
AvOtherLuPhos = AvOtherLuPhos + z.LuTotPhos_1[y][l]
if z.Landuse[l] in [LandUse.LD_MIXED, LandUse.LD_RESIDENTIAL]:
z.n2c = z.n2c + z.LuSedYield[y][l]
z.n6c = z.n6c + \
LuTotNitr_1_f(z.NYrs, z.NRur, z.NUrb, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.AntMoist_0,
z.CN,
z.Grow_0,
z.Area, z.NitrConc, z.ManNitr, z.ManuredAreas, z.FirstManureMonth,
z.LastManureMonth,
z.FirstManureMonth2, z.LastManureMonth2, z.SedDelivRatio_0, z.KF, z.LS, z.C, z.P,
z.SedNitr, z.CNP_0, z.Imper, z.ISRR, z.ISRA,
z.Qretention, z.PctAreaInfil, z.LoadRateImp, z.LoadRatePerv, z.Storm, z.UrbBMPRed,
z.FilterWidth, z.PctStrmBuf, z.Acoef,
z.CNI_0, z.Nqual, z.ShedAreaDrainLake, z.RetentNLake, z.AttenFlowDist,
z.AttenFlowVel,
z.AttenLossRateN)[y][l]
z.n13c = z.n13c + z.LuTotPhos_1[y][l]
z.n6cdn = z.n6cdn + z.LuDisNitr[y][l]
z.n13cdp = z.n13cdp + z.LuDisPhos[y][l]
elif z.Landuse[l] in [LandUse.MD_MIXED, LandUse.HD_MIXED,
LandUse.MD_RESIDENTIAL, LandUse.HD_RESIDENTIAL]:
z.n2b = z.n2b + z.LuSedYield[y][l]
z.n6b = z.n6b + \
LuTotNitr_1_f(z.NYrs, z.NRur, z.NUrb, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.AntMoist_0,
z.CN,
z.Grow_0,
z.Area, z.NitrConc, z.ManNitr, z.ManuredAreas, z.FirstManureMonth,
z.LastManureMonth,
z.FirstManureMonth2, z.LastManureMonth2, z.SedDelivRatio_0, z.KF, z.LS, z.C, z.P,
z.SedNitr, z.CNP_0, z.Imper, z.ISRR, z.ISRA,
z.Qretention, z.PctAreaInfil, z.LoadRateImp, z.LoadRatePerv, z.Storm, z.UrbBMPRed,
z.FilterWidth, z.PctStrmBuf, z.Acoef,
z.CNI_0, z.Nqual, z.ShedAreaDrainLake, z.RetentNLake, z.AttenFlowDist,
z.AttenFlowVel,
z.AttenLossRateN)[y][l]
z.n13b = z.n13b + z.LuTotPhos_1[y][l]
z.n6bdn = z.n6bdn + z.LuDisNitr[y][l]
z.n13bdp = z.n13bdp + z.LuDisPhos[y][l]
# Convert animal loads into English units
z.GRLBP = z.GRLostBarnPSum[y]
z.NGLBP = z.NGLostBarnPSum[y]
z.NGLManP = z.NGLostManPSum[y]
# Get the fecal coliform values
z.NGLBFC = z.NGLostBarnFCSum[y]
z.GRLBFC = z.GRLostBarnFCSum[y]
z.GRSFC = z.AvGRStreamFC
z.GRSP = z.AvGRStreamP
# Get the initial pathogen loads
z.n139 = z.AnimalFCSum[y]
z.n140 = z.WWOrgsSum[y]
z.n146 = z.WWOrgsSum[y]
z.n141 = z.SSOrgsSum[y]
z.n147 = z.SSOrgsSum[y]
z.n142 = z.UrbOrgsSum[y]
z.n143 = z.WildOrgsSum[y]
z.n149 = z.WildOrgsSum[y]
# Initial pathogen total load
z.n144 = z.n139 + z.n140 + z.n141 + z.n142 + z.n143
# FARM ANIMAL LOADS
n7b = z.AnimalNSum[y]
# BUG: This is a bug in the original code.
# This should be AnimalPSum
n14b = z.AnimalNSum[y]
# CONVERT AVERAGE STREAM BANK ERIOSION, N AND P TO ENGLISH UNITS
z.n4 = round((z.StreamBankErosSum[y] * z.RetentFactorSed * (1 - z.AttenTSS) * SedConvert))
z.n8 = round((StreamBankNSum_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN,
z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0,
z.RecessionCoef, z.SeepCoef, z.Qretention, z.PctAreaInfil, z.n25b, z.Landuse,
z.TileDrainDensity, z.PointFlow, z.StreamWithdrawal, z.GroundWithdrawal,
z.NumAnimals, z.AvgAnimalWt, z.StreamFlowVolAdj, z.SedAFactor_0, z.AvKF,
z.AvSlope, z.SedAAdjust, z.StreamLength, z.n42b, z.AgLength,
z.UrbBankStab, z.SedNitr, z.BankNFrac, z.n69c, z.n45, z.n69, z.n46c, z.n42)[
y] * NPConvert * RetentFactorN(z.ShedAreaDrainLake, z.RetentNLake) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN))))
z.n15 = round((z.StreamBankPSum[y] * NPConvert * z.RetentFactorP * (1 - z.AttenP)))
# PERFORM LOAD REDUCTIONS BASED ON BMPS IN SCENARIO FILE
LoadReductions.AdjustScnLoads(z)
# CONVERT AVERAGE STREAM BANK ERIOSION, N AND P TO ENGLISH UNITS
z.StreamBankErosSum[y] = z.n4
z.StreamBankPSum[y] = z.n15
z.AnimalFCSum[y] = z.n145
z.UrbOrgsSum[y] = z.n148
# Get the FC reduction for monthly loads
UrbanFCFrac = 0
FarmFCFrac = 0
if z.n139 > 0:
FarmFCFrac = z.n145 / z.n139
if z.n142 > 0:
UrbanFCFrac = z.n148 / z.n142
for i in range(12):
z.AnimalFCSum[y] *= FarmFCFrac
z.UrbOrgsSum[y] *= UrbanFCFrac
# Reduced total pathogen loads
n150 = z.n145 + z.n146 + z.n147 + z.n148 + z.n149
z.TotalOrgsSum[y] = n150
# FARM ANIMAL LOADS
z.AnimalNSum[y] = n7b
# BUG: This is a bug in the original code.
# This should be AnimalPSum
z.AnimalNSum[y] = n14b
# FOR ALL LAND USES
z.TotDisNitr = 0
z.TotTotNitr = 0
z.TotDisPhos = 0
z.TotTotPhos = 0
z.TotSedyield = 0
z.LuTotNitr_2[y] = \
LuTotNitr_1_f(z.NYrs, z.NRur, z.NUrb, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.AntMoist_0, z.CN,
z.Grow_0,
z.Area, z.NitrConc, z.ManNitr, z.ManuredAreas, z.FirstManureMonth, z.LastManureMonth,
z.FirstManureMonth2, z.LastManureMonth2, z.SedDelivRatio_0, z.KF, z.LS, z.C, z.P,
z.SedNitr, z.CNP_0, z.Imper, z.ISRR, z.ISRA,
z.Qretention, z.PctAreaInfil, z.LoadRateImp, z.LoadRatePerv, z.Storm, z.UrbBMPRed,
z.FilterWidth, z.PctStrmBuf, z.Acoef,
z.CNI_0, z.Nqual, z.ShedAreaDrainLake, z.RetentNLake, z.AttenFlowDist, z.AttenFlowVel,
z.AttenLossRateN)[y]
for l in range(z.NLU):
if z.Landuse[l] is LandUse.HAY_PAST:
z.LuSedYield[y][l] = z.n2
z.LuTotNitr_2[y][l] = z.n6
z.LuTotPhos_1[y][l] = z.n13
z.LuDisNitr[y][l] = z.n6dn
z.LuDisPhos[y][l] = z.n13dp
if z.LuDisNitr[y][l] > z.LuTotNitr_2[y][l]:
z.LuDisNitr[y][l] = z.LuTotNitr_2[y][l]
if z.LuDisPhos[y][l] > z.LuTotPhos_1[y][l]:
z.LuDisPhos[y][l] = z.LuTotPhos_1[y][l]
elif z.Landuse[l] is LandUse.CROPLAND:
if z.LuDisNitr[y][l] > 0:
z.LuDisNitr[y][l] = z.LuDisNitr[y][l] * z.n5 / z.LuTotNitr_2[y][l]
if z.LuDisPhos[y][l] > 0:
z.LuDisPhos[y][l] = z.LuDisPhos[y][l] * z.n12 / z.LuTotPhos_1[y][l]
z.LuSedYield[y][l] = z.n1
z.LuTotNitr_2[y][l] = z.n5
z.LuTotPhos_1[y][l] = z.n12
z.LuDisNitr[y][l] = z.n5dn
z.LuDisPhos[y][l] = z.n12dp
elif z.Landuse[l] is LandUse.UNPAVED_ROAD:
z.LuSedYield[y][l] = z.n2d
z.LuTotNitr_2[y][l] = z.n6d
z.LuTotPhos_1[y][l] = z.n13d
z.LuDisNitr[y][l] = z.n6ddn
z.LuDisPhos[y][l] = z.n13ddp
if z.LuDisNitr[y][l] > z.LuTotNitr_2[y][l]:
z.LuDisNitr[y][l] = z.LuTotNitr_2[y][l]
if z.LuDisPhos[y][l] > z.LuTotPhos_1[y][l]:
z.LuDisPhos[y][l] = z.LuTotPhos_1[y][l]
if z.n24b > 0 and z.Landuse[l] in [LandUse.LD_MIXED, LandUse.LD_RESIDENTIAL]:
z.LuSedYield[y][l] = z.n2c * z.Area[l] / z.n24b
z.LuTotNitr_2[y][l] = z.n6c * z.Area[l] / z.n24b
z.LuTotPhos_1[y][l] = z.n13c * z.Area[l] / z.n24b
z.LuDisNitr[y][l] = z.n6cdn * z.Area[l] / z.n24b
z.LuDisPhos[y][l] = z.n13cdp * z.Area[l] / z.n24b
if z.LuDisNitr[y][l] > z.LuTotNitr_2[y][l]:
z.LuDisNitr[y][l] = z.LuTotNitr_2[y][l]
if z.LuDisPhos[y][l] > z.LuTotPhos_1[y][l]:
z.LuDisPhos[y][l] = z.LuTotPhos_1[y][l]
elif z.n23b > 0 and z.Landuse[l] in [LandUse.MD_MIXED, LandUse.HD_MIXED,
LandUse.MD_RESIDENTIAL, LandUse.HD_RESIDENTIAL]:
z.LuSedYield[y][l] = z.n2b * z.Area[l] / z.n23b
z.LuTotNitr_2[y][l] = z.n6b * z.Area[l] / z.n23b
z.LuTotPhos_1[y][l] = z.n13b * z.Area[l] / z.n23b
z.LuDisNitr[y][l] = z.n6bdn * z.Area[l] / z.n23b
z.LuDisPhos[y][l] = z.n13bdp * z.Area[l] / z.n23b
if z.LuDisNitr[y][l] > z.LuTotNitr_2[y][l]:
z.LuDisNitr[y][l] = z.LuTotNitr_2[y][l]
if z.LuDisPhos[y][l] > z.LuTotPhos_1[y][l]:
z.LuDisPhos[y][l] = z.LuTotPhos_1[y][l]
if z.LuDisNitr[y][l] > z.LuTotNitr_2[y][l]:
z.LuDisNitr[y][l] = z.LuTotNitr_2[y][l]
if z.LuDisPhos[y][l] > z.LuTotPhos_1[y][l]:
z.LuDisPhos[y][l] = z.LuTotPhos_1[y][l]
# WRITE THE RESULTS FILES INTO THE OUTPUT DIRECTORY IN METRIC UNITS
# TODO: Skipping section that prepares and writes AnnualFile and AnnCsvFile
# Lines ~630 - 921
# WRITE THE SUMARY FILES TO THE OUTPUT DIRECTORY IN METRIC UNITS
# TODO: For now, we are only writing the first chunk of AvgFile
# Sum Variables for Aggregate Summary Ouput Files
# if FirstRun: XXX: Commented out because we don't
# have the concept of a "first run" in the port.
SumNYrs = z.NYrs
SumNRur = z.NRur
SumNUrb = z.NUrb
SumNLU = z.NLU
SumWxYrBeg = z.WxYrBeg
SumWxYrEnd = z.WxYrEnd
# Which land use sources to include in the totals.
# These are indices of this array: https://github.com/WikiWatershed/model-my-watershed/blob/415be752ea7b66ae5e1d15afe1a11cf4051dbd5e/src/mmw/mmw/settings/gwlfe_settings.py#L23-L39
# This list matches the land type in the Loads list below
# 13 was added and 4, 5, 9 removed in https://github.com/WikiWatershed/gwlf-e/pull/84
sources = (0, 1, 2, 3, 6, 7, 8, 10, 11, 12, 13)
# ha
AreaTotal = sum(z.Area[l] for l in sources)
# kg
SumSed = sum(z.AvLuSedYield[l] for l in sources) * TONNE_TO_KG
SumSed += z.AvStreamBankErosSum
# kg
SumNitr = sum(z.AvLuTotNitr[l] for l in sources)
SumNitr += z.AvStreamBankNSum
SumNitr += AvAnimalNSum_1_f(z.NYrs, z.GrazingAnimal_0, z.NumAnimals, z.AvgAnimalWt, z.AnimalDailyN, z.NGAppNRate,
z.NGPctSoilIncRate, z.GRAppNRate, z.GRPctSoilIncRate, z.GrazingNRate, z.GRPctManApp,
z.PctGrazing, z.GRBarnNRate,
z.Prec, z.DaysMonth, z.AWMSGrPct, z.GrAWMSCoeffN, z.RunContPct, z.RunConCoeffN, z.n41b,
z.n85h, z.NGPctManApp, z.AWMSNgPct,
z.NGBarnNRate, z.NgAWMSCoeffN, z.n41d, z.n85j, z.n41f, z.n85l, z.PctStreams, z.n42,
z.n45,
z.n69, z.n43, z.n64) * RetentFactorN(z.ShedAreaDrainLake, z.RetentNLake) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN))
SumNitr += z.AvGroundNitrSum * RetentFactorN(z.ShedAreaDrainLake, z.RetentNLake) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN))
SumNitr += YrPointNitr * RetentFactorN(z.ShedAreaDrainLake, z.RetentNLake) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN))
SumNitr += z.AvSeptNitr * RetentFactorN(z.ShedAreaDrainLake, z.RetentNLake) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN))
# kg
SumPhos = sum(z.AvLuTotPhos[l] for l in sources)
SumPhos += z.AvStreamBankPSum
SumPhos += z.AvAnimalPSum * z.RetentFactorP * (1 - z.AttenP)
SumPhos += z.AvGroundPhosSum * z.RetentFactorP * (1 - z.AttenP)
SumPhos += YrPointPhos * z.RetentFactorP * (1 - z.AttenP)
SumPhos += z.AvSeptPhos * z.RetentFactorP * (1 - z.AttenP)
# m^3/year
MeanFlow = (z.AvStreamFlowSum * CM_TO_M) * (AreaTotal * HA_TO_M2)
# Find index of month with lowest mean flow.
LowFlowMonth = z.AvStreamFlow.tolist().index(min(z.AvStreamFlow))
# m^3/year
MeanLowFlow = (z.AvStreamFlow[LowFlowMonth] * CM_TO_M) * (AreaTotal * HA_TO_M2)
# m^3/second
MeanFlowPS = MeanFlow / 31536000
# kg/ha
if AreaTotal > 0:
LoadingRateSed = SumSed / AreaTotal
LoadingRateN = SumNitr / AreaTotal
LoadingRateP = SumPhos / AreaTotal
else:
LoadingRateSed = 0
LoadingRateN = 0
LoadingRateP = 0
# mg/l
if MeanFlow > 0:
ConcSed = (SumSed * KG_TO_MG) / (MeanFlow * M3_TO_L)
ConcN = (SumNitr * KG_TO_MG) / (MeanFlow * M3_TO_L)
ConcP = (SumPhos * KG_TO_MG) / (MeanFlow * M3_TO_L)
else:
ConcSed = 0
ConcN = 0
ConcP = 0
# mg/l
if MeanLowFlow > 0:
LFConcSed = ((AvSedYield(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV,
z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0, z.RecessionCoef, z.SeepCoef,
z.Qretention, z.PctAreaInfil, z.n25b, z.Landuse, z.TileDrainDensity, z.PointFlow,
z.StreamWithdrawal, z.GroundWithdrawal, z.NumAnimals, z.AvgAnimalWt,
z.StreamFlowVolAdj, z.SedAFactor_0, z.AvKF, z.AvSlope, z.SedAAdjust, z.StreamLength,
z.n42b, z.n46c, z.n85d, z.AgLength, z.n42, z.n45, z.n85, z.UrbBankStab, z.Acoef, z.KF,
z.LS, z.C, z.P, z.SedDelivRatio_0)[LowFlowMonth] * TONNE_TO_KG * KG_TO_MG) / (
MeanLowFlow * M3_TO_L))
LFConcN = ((z.AvTotNitr[LowFlowMonth] * KG_TO_MG) /
(MeanLowFlow * M3_TO_L))
LFConcP = ((z.AvTotPhos[LowFlowMonth] * KG_TO_MG) /
(MeanLowFlow * M3_TO_L))
else:
LFConcSed = 0
LFConcN = 0
LFConcP = 0
output = {}
# Equivalent to Line 927 of source
output['meta'] = {
'NYrs': z.NYrs,
'NRur': z.NRur,
'NUrb': z.NUrb,
'NLU': z.NLU,
'SedDelivRatio': SedDelivRatio(z.SedDelivRatio_0),
'WxYrBeg': z.WxYrBeg,
'WxYrEnd': z.WxYrEnd,
}
output['AreaTotal'] = AreaTotal
output['MeanFlow'] = MeanFlow
output['MeanFlowPerSecond'] = MeanFlowPS
# Equivalent to lines 965 - 988 of source
av_evapo_trans = AvEvapoTrans_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV,
z.PcntET, z.DayHrs,
z.MaxWaterCap) # TODO: once all of the monthly variables have been extracted, rewrite how this works
av_tile_drain = AvTileDrain_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper,
z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap,
z.SatStor_0, z.RecessionCoef, z.SeepCoef,
z.Landuse, z.TileDrainDensity)
av_withdrawal = AvWithdrawal_f(z.NYrs, z.StreamWithdrawal, z.GroundWithdrawal)
av_ground_water = AvGroundWater_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN, z.UnsatStor_0,
z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap,
z.SatStor_0, z.RecessionCoef, z.SeepCoef, z.Landuse, z.TileDrainDensity)
av_runoff = AvRunoff_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.Qretention, z.PctAreaInfil,
z.n25b, z.CN, z.Landuse, z.TileDrainDensity)
output['monthly'] = []
for i in range(0, 12):
output['monthly'].append({
'AvPrecipitation': z.AvPrecipitation[i],
'AvEvapoTrans': av_evapo_trans[i],
'AvGroundWater': av_ground_water[i],
'AvRunoff': av_runoff[i],
'AvStreamFlow': z.AvStreamFlow[i],
'AvPtSrcFlow': z.AvPtSrcFlow[i],
'AvTileDrain': av_tile_drain[i],
'AvWithdrawal': av_withdrawal[i],
})
output['Loads'] = []
output['Loads'].append({
'Source': 'Hay/Pasture',
'Sediment': z.AvLuSedYield[0] * TONNE_TO_KG,
'TotalN': z.AvLuTotNitr[0],
'TotalP': z.AvLuTotPhos[0],
})
output['Loads'].append({
'Source': 'Cropland',
'Sediment': z.AvLuSedYield[1] * TONNE_TO_KG,
'TotalN': z.AvLuTotNitr[1],
'TotalP': z.AvLuTotPhos[1],
})
# Forest
output['Loads'].append({
'Source': 'Wooded Areas',
'Sediment': z.AvLuSedYield[2] * TONNE_TO_KG,
'TotalN': z.AvLuTotNitr[2],
'TotalP': z.AvLuTotPhos[2],
})
output['Loads'].append({
'Source': 'Wetlands',
'Sediment': z.AvLuSedYield[3] * TONNE_TO_KG,
'TotalN': z.AvLuTotNitr[3],
'TotalP': z.AvLuTotPhos[3],
})
output['Loads'].append({
'Source': 'Open Land',
'Sediment': z.AvLuSedYield[6] * TONNE_TO_KG,
'TotalN': z.AvLuTotNitr[6],
'TotalP': z.AvLuTotPhos[6],
})
# Bare Rock, Sandy Areas
output['Loads'].append({
'Source': 'Barren Areas',
'Sediment': sum(z.AvLuSedYield[l] * TONNE_TO_KG for l in (7, 8)),
'TotalN': sum(z.AvLuTotNitr[l] for l in (7, 8)),
'TotalP': sum(z.AvLuTotPhos[l] for l in (7, 8)),
})
output['Loads'].append({
'Source': 'Low-Density Mixed',
'Sediment': z.AvLuSedYield[10] * TONNE_TO_KG,
'TotalN': z.AvLuTotNitr[10],
'TotalP': z.AvLuTotPhos[10],
})
output['Loads'].append({
'Source': 'Medium-Density Mixed',
'Sediment': z.AvLuSedYield[11] * TONNE_TO_KG,
'TotalN': z.AvLuTotNitr[11],
'TotalP': z.AvLuTotPhos[11],
})
output['Loads'].append({
'Source': 'High-Density Mixed',
'Sediment': z.AvLuSedYield[12] * TONNE_TO_KG,
'TotalN': z.AvLuTotNitr[12],
'TotalP': z.AvLuTotPhos[12],
})
output['Loads'].append({
'Source': 'Low-Density Open Space',
'Sediment': z.AvLuSedYield[13] * TONNE_TO_KG,
'TotalN': z.AvLuTotNitr[13],
'TotalP': z.AvLuTotPhos[13],
})
output['Loads'].append({
'Source': 'Farm Animals',
'Sediment': 0,
'TotalN': AvAnimalNSum_1_f(z.NYrs, z.GrazingAnimal_0, z.NumAnimals, z.AvgAnimalWt, z.AnimalDailyN, z.NGAppNRate,
z.NGPctSoilIncRate, z.GRAppNRate, z.GRPctSoilIncRate, z.GrazingNRate, z.GRPctManApp,
z.PctGrazing, z.GRBarnNRate,
z.Prec, z.DaysMonth, z.AWMSGrPct, z.GrAWMSCoeffN, z.RunContPct, z.RunConCoeffN,
z.n41b,
z.n85h, z.NGPctManApp, z.AWMSNgPct,
z.NGBarnNRate, z.NgAWMSCoeffN, z.n41d, z.n85j, z.n41f, z.n85l, z.PctStreams, z.n42,
z.n45, z.n69, z.n43, z.n64) * RetentFactorN(z.ShedAreaDrainLake, z.RetentNLake) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN)),
'TotalP': z.AvAnimalPSum * z.RetentFactorP * (1 - z.AttenP),
})
output['Loads'].append({
'Source': 'Stream Bank Erosion',
'Sediment': z.AvStreamBankErosSum,
'TotalN': z.AvStreamBankNSum,
'TotalP': z.AvStreamBankPSum,
})
output['Loads'].append({
'Source': 'Subsurface Flow',
'Sediment': 0,
'TotalN': z.AvGroundNitrSum * RetentFactorN(z.ShedAreaDrainLake, z.RetentNLake) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN)),
'TotalP': z.AvGroundPhosSum * z.RetentFactorP * (1 - z.AttenP),
})
output['Loads'].append({
'Source': 'Point Sources',
'Sediment': 0,
'TotalN': YrPointNitr * RetentFactorN(z.ShedAreaDrainLake, z.RetentNLake) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN)),
'TotalP': YrPointPhos * z.RetentFactorP * (1 - z.AttenP),
})
output['Loads'].append({
'Source': 'Septic Systems',
'Sediment': 0,
'TotalN': z.AvSeptNitr * RetentFactorN(z.ShedAreaDrainLake, z.RetentNLake) * (
1 - AttenN(z.AttenFlowDist, z.AttenFlowVel, z.AttenLossRateN)),
'TotalP': z.AvSeptPhos * z.RetentFactorP * (1 - z.AttenP),
})
output['SummaryLoads'] = []
output['SummaryLoads'].append({
'Source': 'Total Loads',
'Unit': 'kg',
'Sediment': SumSed,
'TotalN': SumNitr,
'TotalP': SumPhos,
})
output['SummaryLoads'].append({
'Source': 'Loading Rates',
'Unit': 'kg/ha',
'Sediment': LoadingRateSed,
'TotalN': LoadingRateN,
'TotalP': LoadingRateP,
})
output['SummaryLoads'].append({
'Source': 'Mean Annual Concentration',
'Unit': 'mg/l',
'Sediment': ConcSed,
'TotalN': ConcN,
'TotalP': ConcP,
})
output['SummaryLoads'].append({
'Source': 'Mean Low-Flow Concentration',
'Unit': 'mg/l',
'Sediment': LFConcSed,
'TotalN': LFConcN,
'TotalP': LFConcP,
})
return output
def WriteOutputSumFiles():
pass
def UrbanAreasOutput():
pass
|
WikiWatershed/gwlf-e
|
gwlfe/WriteOutputFiles.py
|
Python
|
apache-2.0
| 49,171
|
# Copyright (c) 2017 Huawei Technologies India Pvt.Limited.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutronclient.osc.v2.sfc import sfc_flow_classifier
from neutronclient.tests.unit.osc.v2.sfc import fakes
get_id = 'neutronclient.osc.v2.sfc.sfc_flow_classifier._get_id'
def _get_id(client, id_or_name, resource):
return id_or_name
class TestCreateSfcFlowClassifier(fakes.TestNeutronClientOSCV2):
_fc = fakes.FakeSfcFlowClassifier.create_flow_classifier()
columns = ('Description',
'Destination IP',
'Destination Port Range Max',
'Destination Port Range Min',
'Ethertype',
'ID',
'L7 Parameters',
'Logical Destination Port',
'Logical Source Port',
'Name',
'Project',
'Protocol',
'Source IP',
'Source Port Range Max',
'Source Port Range Min')
def get_data(self):
return (
self._fc['description'],
self._fc['destination_ip_prefix'],
self._fc['destination_port_range_max'],
self._fc['destination_port_range_min'],
self._fc['ethertype'],
self._fc['id'],
self._fc['l7_parameters'],
self._fc['logical_destination_port'],
self._fc['logical_source_port'],
self._fc['name'],
self._fc['project_id'],
self._fc['protocol'],
self._fc['source_ip_prefix'],
self._fc['source_port_range_max'],
self._fc['source_port_range_min']
)
def setUp(self):
super(TestCreateSfcFlowClassifier, self).setUp()
mock.patch(get_id, new=_get_id).start()
self.neutronclient.create_sfc_flow_classifier = mock.Mock(
return_value={'flow_classifier': self._fc})
self.data = self.get_data()
# Get the command object to test
self.cmd = sfc_flow_classifier.CreateSfcFlowClassifier(self.app,
self.namespace)
def test_create_flow_classifier_default_options(self):
arglist = [
"--logical-source-port", self._fc['logical_source_port'],
"--ethertype", self._fc['ethertype'],
self._fc['name'],
]
verifylist = [
('logical_source_port', self._fc['logical_source_port']),
('ethertype', self._fc['ethertype']),
('name', self._fc['name']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.neutronclient.create_sfc_flow_classifier.assert_called_once_with({
'flow_classifier': {
'name': self._fc['name'],
'logical_source_port': self._fc['logical_source_port'],
'ethertype': self._fc['ethertype']}
})
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_create_flow_classifier(self):
arglist = [
"--description", self._fc['description'],
"--ethertype", self._fc['ethertype'],
"--protocol", self._fc['protocol'],
"--source-ip-prefix", self._fc['source_ip_prefix'],
"--destination-ip-prefix", self._fc['destination_ip_prefix'],
"--logical-source-port", self._fc['logical_source_port'],
"--logical-destination-port", self._fc['logical_destination_port'],
self._fc['name'],
"--l7-parameters", 'url=path',
]
param = 'url=path'
verifylist = [
('description', self._fc['description']),
('name', self._fc['name']),
('ethertype', self._fc['ethertype']),
('protocol', self._fc['protocol']),
('source_ip_prefix', self._fc['source_ip_prefix']),
('destination_ip_prefix', self._fc['destination_ip_prefix']),
('logical_source_port', self._fc['logical_source_port']),
('logical_destination_port',
self._fc['logical_destination_port']),
('l7_parameters', param)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.neutronclient.create_sfc_flow_classifier.assert_called_once_with({
'flow_classifier': {
'name': self._fc['name'],
'description': self._fc['description'],
'ethertype': self._fc['ethertype'],
'protocol': self._fc['protocol'],
'source_ip_prefix': self._fc['source_ip_prefix'],
'destination_ip_prefix': self._fc['destination_ip_prefix'],
'logical_source_port': self._fc['logical_source_port'],
'logical_destination_port':
self._fc['logical_destination_port'],
'l7_parameters': param
}
})
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
class TestDeleteSfcFlowClassifier(fakes.TestNeutronClientOSCV2):
_flow_classifier = \
fakes.FakeSfcFlowClassifier.create_flow_classifiers(count=1)
def setUp(self):
super(TestDeleteSfcFlowClassifier, self).setUp()
mock.patch(get_id, new=_get_id).start()
self.neutronclient.delete_sfc_flow_classifier = mock.Mock(
return_value=None)
self.cmd = sfc_flow_classifier.DeleteSfcFlowClassifier(self.app,
self.namespace)
def test_delete_flow_classifier(self):
client = self.app.client_manager.neutronclient
mock_flow_classifier_delete = client.delete_sfc_flow_classifier
arglist = [
self._flow_classifier[0]['id'],
]
verifylist = [
('flow_classifier', self._flow_classifier[0]['id']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
mock_flow_classifier_delete.assert_called_once_with(
self._flow_classifier[0]['id'])
self.assertIsNone(result)
class TestSetSfcFlowClassifier(fakes.TestNeutronClientOSCV2):
_flow_classifier = fakes.FakeSfcFlowClassifier.create_flow_classifier()
_flow_classifier_name = _flow_classifier['name']
_flow_classifier_id = _flow_classifier['id']
def setUp(self):
super(TestSetSfcFlowClassifier, self).setUp()
mock.patch(get_id, new=_get_id).start()
self.neutronclient.update_sfc_flow_classifier = mock.Mock(
return_value=None)
self.cmd = sfc_flow_classifier.SetSfcFlowClassifier(self.app,
self.namespace)
def test_set_flow_classifier(self):
client = self.app.client_manager.neutronclient
mock_flow_classifier_update = client.update_sfc_flow_classifier
arglist = [
self._flow_classifier_name,
'--name', 'name_updated',
'--description', 'desc_updated'
]
verifylist = [
('flow_classifier', self._flow_classifier_name),
('name', 'name_updated'),
('description', 'desc_updated'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {'flow_classifier': {
'name': 'name_updated',
'description': 'desc_updated'}}
mock_flow_classifier_update.assert_called_once_with(
self._flow_classifier_name, attrs)
self.assertIsNone(result)
class TestShowSfcFlowClassifier(fakes.TestNeutronClientOSCV2):
_fc = fakes.FakeSfcFlowClassifier.create_flow_classifier()
data = (
_fc['description'],
_fc['destination_ip_prefix'],
_fc['destination_port_range_max'],
_fc['destination_port_range_min'],
_fc['ethertype'],
_fc['id'],
_fc['l7_parameters'],
_fc['logical_destination_port'],
_fc['logical_source_port'],
_fc['name'],
_fc['project_id'],
_fc['protocol'],
_fc['source_ip_prefix'],
_fc['source_port_range_max'],
_fc['source_port_range_min']
)
_flow_classifier = {'flow_classifier': _fc}
_flow_classifier_id = _fc['id']
columns = ('Description',
'Destination IP',
'Destination Port Range Max',
'Destination Port Range Min',
'Ethertype',
'ID',
'L7 Parameters',
'Logical Destination Port',
'Logical Source Port',
'Name',
'Project',
'Protocol',
'Source IP',
'Source Port Range Max',
'Source Port Range Min')
def setUp(self):
super(TestShowSfcFlowClassifier, self).setUp()
mock.patch(get_id, new=_get_id).start()
self.neutronclient.show_sfc_flow_classifier = mock.Mock(
return_value=self._flow_classifier
)
# Get the command object to test
self.cmd = sfc_flow_classifier.ShowSfcFlowClassifier(self.app,
self.namespace)
def test_show_flow_classifier(self):
client = self.app.client_manager.neutronclient
mock_flow_classifier_show = client.show_sfc_flow_classifier
arglist = [
self._flow_classifier_id,
]
verifylist = [
('flow_classifier', self._flow_classifier_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
mock_flow_classifier_show.assert_called_once_with(
self._flow_classifier_id)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
class TestListSfcFlowClassifier(fakes.TestNeutronClientOSCV2):
_fc = fakes.FakeSfcFlowClassifier.create_flow_classifiers(count=1)
columns = ('ID', 'Name', 'Summary')
columns_long = ('ID', 'Name', 'Protocol', 'Ethertype', 'Source IP',
'Destination IP', 'Logical Source Port',
'Logical Destination Port', 'Source Port Range Min',
'Source Port Range Max', 'Destination Port Range Min',
'Destination Port Range Max', 'L7 Parameters',
'Description', 'Project')
_flow_classifier = _fc[0]
data = [
_flow_classifier['id'],
_flow_classifier['name'],
_flow_classifier['protocol'],
_flow_classifier['source_ip_prefix'],
_flow_classifier['destination_ip_prefix'],
_flow_classifier['logical_source_port'],
_flow_classifier['logical_destination_port']
]
data_long = [
_flow_classifier['id'],
_flow_classifier['name'],
_flow_classifier['protocol'],
_flow_classifier['ethertype'],
_flow_classifier['source_ip_prefix'],
_flow_classifier['destination_ip_prefix'],
_flow_classifier['logical_source_port'],
_flow_classifier['logical_destination_port'],
_flow_classifier['source_port_range_min'],
_flow_classifier['source_port_range_max'],
_flow_classifier['destination_port_range_min'],
_flow_classifier['destination_port_range_max'],
_flow_classifier['l7_parameters'],
_flow_classifier['description']
]
_flow_classifier1 = {'flow_classifiers': _flow_classifier}
_flow_classifier_id = _flow_classifier['id']
def setUp(self):
super(TestListSfcFlowClassifier, self).setUp()
mock.patch(get_id, new=_get_id).start()
self.neutronclient.list_sfc_flow_classifiers = mock.Mock(
return_value={'flow_classifiers': self._fc}
)
# Get the command object to test
self.cmd = sfc_flow_classifier.ListSfcFlowClassifier(self.app,
self.namespace)
def test_list_flow_classifiers(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns = self.cmd.take_action(parsed_args)
fcs = self.neutronclient \
.list_sfc_flow_classifiers()['flow_classifiers']
fc = fcs[0]
data = [
fc['id'],
fc['name'],
fc['protocol'],
fc['source_ip_prefix'],
fc['destination_ip_prefix'],
fc['logical_source_port'],
fc['logical_destination_port']
]
self.assertEqual(list(self.columns), columns[0])
self.assertEqual(self.data, data)
def test_list_with_long_option(self):
arglist = ['--long']
verifylist = [('long', True)]
fcs = self.neutronclient \
.list_sfc_flow_classifiers()['flow_classifiers']
fc = fcs[0]
data = [
fc['id'],
fc['name'],
fc['protocol'],
fc['ethertype'],
fc['source_ip_prefix'],
fc['destination_ip_prefix'],
fc['logical_source_port'],
fc['logical_destination_port'],
fc['source_port_range_min'],
fc['source_port_range_max'],
fc['destination_port_range_min'],
fc['destination_port_range_max'],
fc['l7_parameters'],
fc['description']
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns_long = self.cmd.take_action(parsed_args)[0]
self.assertEqual(list(self.columns_long), columns_long)
self.assertEqual(self.data_long, data)
|
openstack/python-neutronclient
|
neutronclient/tests/unit/osc/v2/sfc/test_flow_classifier.py
|
Python
|
apache-2.0
| 14,512
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle.fluid.framework as framework
class ConstantTest(unittest.TestCase):
def test_const_value(self):
self.assertEqual(framework.GRAD_VAR_SUFFIX, "@GRAD")
self.assertEqual(framework.TEMP_VAR_NAME, "@TEMP@")
self.assertEqual(framework.GRAD_VAR_SUFFIX, "@GRAD")
self.assertEqual(framework.ZERO_VAR_SUFFIX, "@ZERO")
if __name__ == '__main__':
unittest.main()
|
jacquesqiao/Paddle
|
python/paddle/fluid/tests/unittests/test_const_value.py
|
Python
|
apache-2.0
| 1,037
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test suite for Matter devices with door lock endpoint."""
from typing import Type
from gazoo_device.tests.functional_tests.mixins import door_lock_cluster_suite
from gazoo_device.tests.functional_tests.utils import gdm_test_base
class DoorLockTestSuite(door_lock_cluster_suite.DoorLockTestSuite):
"""Tests for the door lock endpoint capability."""
def setup_class(self) -> None:
"""Sets the endpoint instance."""
super().setup_class()
self.endpoint = self.device.door_lock
@classmethod
def requires_pairing(cls) -> bool:
"""Returns True if the device must be paired to run this test suite."""
return False
@classmethod
def is_applicable_to(cls, device_type: str,
device_class: Type[gdm_test_base.DeviceType],
device_name: str) -> bool:
"""Determines if this test suite can run on the given device."""
return device_class.has_capabilities(["door_lock"])
|
google/gazoo-device
|
gazoo_device/tests/functional_tests/door_lock_test_suite.py
|
Python
|
apache-2.0
| 1,523
|
"""Tests for the project model."""
import unittest
from ctc.models import project as project_model
from ctc.models import user as user_model
from ctc.testing import model_helpers
from ctc.testing import testutil
# Tests don't need docstrings, so pylint: disable=C0111
class ProjectTests(testutil.CtcTestCase):
def test_populate(self):
fields = {
'name': 'name', 'overview': 'overview',
'organization_name': 'organization_name',
'organization_mission': 'organization_mission',
'organization_contact': 'organization_contact',
'details': 'details', 'collaboration_link': 'collaboration_link',
'code_link': 'code_link'}
project = project_model.Project()
populated_project = project.populate(fields)
self.assertEqual(project, populated_project)
self.assertEqual(project.name, 'name')
self.assertEqual(project.overview, 'overview')
self.assertEqual(project.organization_name, 'organization_name')
self.assertEqual(project.organization_mission, 'organization_mission')
self.assertEqual(project.organization_contact, 'organization_contact')
self.assertEqual(project.details, 'details')
self.assertEqual(project.collaboration_link, 'collaboration_link')
self.assertEqual(project.code_link, 'code_link')
def test_get_by_owner(self):
user_key = user_model.User(email='owner@codethechange.org').put()
self.assertEqual(project_model.get_by_owner(user_key), [])
project1 = model_helpers.create_project(owner_key=user_key)
project2 = model_helpers.create_project(owner_key=user_key)
other_user = user_model.User(
email='nottheowner@codethechange.org').put()
model_helpers.create_project(owner_key=other_user)
# Ordered by most recent. Doesn't include the other user's project.
expected_projects = [project2, project1]
actual_projects = project_model.get_by_owner(user_key)
self.assertEqual(expected_projects, actual_projects)
if __name__ == '__main__':
unittest.main()
|
samking/code-the-change-projects
|
ctc/models/project_test.py
|
Python
|
apache-2.0
| 2,135
|
# ccm node
from __future__ import absolute_import, with_statement
import os
import re
import shutil
import signal
import stat
import subprocess
import time
import yaml
from six import iteritems, print_
from ccmlib import common, extension, repository
from ccmlib.node import (Node, NodeError, ToolError,
handle_external_tool_process)
class DseNode(Node):
"""
Provides interactions to a DSE node.
"""
def __init__(self, name, cluster, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save=True, binary_interface=None, byteman_port='0', environment_variables=None):
super(DseNode, self).__init__(name, cluster, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save, binary_interface, byteman_port, environment_variables=environment_variables)
self.get_cassandra_version()
self._dse_config_options = {}
if self.cluster.hasOpscenter():
self._copy_agent()
def get_install_cassandra_root(self):
return os.path.join(self.get_install_dir(), 'resources', 'cassandra')
def get_node_cassandra_root(self):
return os.path.join(self.get_path(), 'resources', 'cassandra')
def get_conf_dir(self):
"""
Returns the path to the directory where Cassandra config are located
"""
return os.path.join(self.get_path(), 'resources', 'cassandra', 'conf')
def get_tool(self, toolname):
return common.join_bin(os.path.join(self.get_install_dir(), 'resources', 'cassandra'), 'bin', toolname)
def get_tool_args(self, toolname):
return [common.join_bin(os.path.join(self.get_install_dir(), 'resources', 'cassandra'), 'bin', 'dse'), toolname]
def get_env(self):
(node_ip, _) = self.network_interfaces['binary']
return common.make_dse_env(self.get_install_dir(), self.get_path(), node_ip)
def get_cassandra_version(self):
return common.get_dse_cassandra_version(self.get_install_dir())
def node_setup(self, version, verbose):
dir, v = repository.setup_dse(version, self.cluster.dse_username, self.cluster.dse_password, verbose=verbose)
return dir
def set_workloads(self, workloads):
self.workloads = workloads
self._update_config()
if 'solr' in self.workloads:
self.__generate_server_xml()
if 'graph' in self.workloads:
(node_ip, _) = self.network_interfaces['binary']
conf_file = os.path.join(self.get_path(), 'resources', 'dse', 'conf', 'dse.yaml')
with open(conf_file, 'r') as f:
data = yaml.load(f)
graph_options = data['graph']
graph_options['gremlin_server']['host'] = node_ip
self.set_dse_configuration_options({'graph': graph_options})
self.__update_gremlin_config_yaml()
if 'dsefs' in self.workloads:
dsefs_options = {'dsefs_options': {'enabled': True,
'work_dir': os.path.join(self.get_path(), 'dsefs'),
'data_directories': [{'dir': os.path.join(self.get_path(), 'dsefs', 'data')}]}}
self.set_dse_configuration_options(dsefs_options)
if 'spark' in self.workloads:
self._update_spark_env()
def set_dse_configuration_options(self, values=None):
if values is not None:
self._dse_config_options = common.merge_configuration(self._dse_config_options, values)
self.import_dse_config_files()
def watch_log_for_alive(self, nodes, from_mark=None, timeout=720, filename='system.log'):
"""
Watch the log of this node until it detects that the provided other
nodes are marked UP. This method works similarly to watch_log_for_death.
We want to provide a higher default timeout when this is called on DSE.
"""
super(DseNode, self).watch_log_for_alive(nodes, from_mark=from_mark, timeout=timeout, filename=filename)
def get_launch_bin(self):
cdir = self.get_install_dir()
launch_bin = common.join_bin(cdir, 'bin', 'dse')
# Copy back the dse scripts since profiling may have modified it the previous time
shutil.copy(launch_bin, self.get_bin_dir())
return common.join_bin(self.get_path(), 'bin', 'dse')
def add_custom_launch_arguments(self, args):
args.append('cassandra')
for workload in self.workloads:
if 'hadoop' in workload:
args.append('-t')
if 'solr' in workload:
args.append('-s')
if 'spark' in workload:
args.append('-k')
if 'cfs' in workload:
args.append('-c')
if 'graph' in workload:
args.append('-g')
def start(self,
join_ring=True,
no_wait=False,
verbose=False,
update_pid=True,
wait_other_notice=True,
replace_token=None,
replace_address=None,
jvm_args=None,
wait_for_binary_proto=False,
profile_options=None,
use_jna=False,
quiet_start=False,
allow_root=False,
set_migration_task=True):
process = super(DseNode, self).start(join_ring, no_wait, verbose, update_pid, wait_other_notice, replace_token,
replace_address, jvm_args, wait_for_binary_proto, profile_options, use_jna,
quiet_start, allow_root, set_migration_task)
if self.cluster.hasOpscenter():
self._start_agent()
def _start_agent(self):
agent_dir = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_dir):
self._write_agent_address_yaml(agent_dir)
self._write_agent_log4j_properties(agent_dir)
args = [os.path.join(agent_dir, 'bin', common.platform_binary('datastax-agent'))]
subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def stop(self, wait=True, wait_other_notice=False, signal_event=signal.SIGTERM, **kwargs):
if self.cluster.hasOpscenter():
self._stop_agent()
return super(DseNode, self).stop(wait=wait, wait_other_notice=wait_other_notice, signal_event=signal_event, **kwargs)
def _stop_agent(self):
agent_dir = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_dir):
pidfile = os.path.join(agent_dir, 'datastax-agent.pid')
if os.path.exists(pidfile):
with open(pidfile, 'r') as f:
pid = int(f.readline().strip())
f.close()
if pid is not None:
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
os.remove(pidfile)
def nodetool(self, cmd, username=None, password=None, capture_output=True, wait=True):
if password is not None:
cmd = '-pw {} '.format(password) + cmd
if username is not None:
cmd = '-u {} '.format(username) + cmd
return super(DseNode, self).nodetool(cmd)
def dsetool(self, cmd):
env = self.get_env()
extension.append_to_client_env(self, env)
node_ip, binary_port = self.network_interfaces['binary']
dsetool = common.join_bin(self.get_install_dir(), 'bin', 'dsetool')
args = [dsetool, '-h', node_ip, '-j', str(self.jmx_port), '-c', str(binary_port)]
args += cmd.split()
p = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return handle_external_tool_process(p, args)
def dse(self, dse_options=None):
if dse_options is None:
dse_options = []
env = self.get_env()
extension.append_to_client_env(self, env)
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse]
args += dse_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def hadoop(self, hadoop_options=None):
if hadoop_options is None:
hadoop_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'hadoop']
args += hadoop_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def hive(self, hive_options=None):
if hive_options is None:
hive_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'hive']
args += hive_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def pig(self, pig_options=None):
if pig_options is None:
pig_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'pig']
args += pig_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def sqoop(self, sqoop_options=None):
if sqoop_options is None:
sqoop_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'sqoop']
args += sqoop_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def spark(self, spark_options=None):
if spark_options is None:
spark_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'spark']
args += spark_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def import_dse_config_files(self):
self._update_config()
if not os.path.isdir(os.path.join(self.get_path(), 'resources', 'dse', 'conf')):
os.makedirs(os.path.join(self.get_path(), 'resources', 'dse', 'conf'))
common.copy_directory(os.path.join(self.get_install_dir(), 'resources', 'dse', 'conf'), os.path.join(self.get_path(), 'resources', 'dse', 'conf'))
self.__update_yaml()
def copy_config_files(self):
for product in ['dse', 'cassandra', 'hadoop', 'hadoop2-client', 'sqoop', 'hive', 'tomcat', 'spark', 'shark', 'mahout', 'pig', 'solr', 'graph']:
src_conf = os.path.join(self.get_install_dir(), 'resources', product, 'conf')
dst_conf = os.path.join(self.get_path(), 'resources', product, 'conf')
if not os.path.isdir(src_conf):
continue
if os.path.isdir(dst_conf):
common.rmdirs(dst_conf)
shutil.copytree(src_conf, dst_conf)
if product == 'solr':
src_web = os.path.join(self.get_install_dir(), 'resources', product, 'web')
dst_web = os.path.join(self.get_path(), 'resources', product, 'web')
if os.path.isdir(dst_web):
common.rmdirs(dst_web)
shutil.copytree(src_web, dst_web)
if product == 'tomcat':
src_lib = os.path.join(self.get_install_dir(), 'resources', product, 'lib')
dst_lib = os.path.join(self.get_path(), 'resources', product, 'lib')
if os.path.isdir(dst_lib):
common.rmdirs(dst_lib)
if os.path.exists(src_lib):
shutil.copytree(src_lib, dst_lib)
src_webapps = os.path.join(self.get_install_dir(), 'resources', product, 'webapps')
dst_webapps = os.path.join(self.get_path(), 'resources', product, 'webapps')
if os.path.isdir(dst_webapps):
common.rmdirs(dst_webapps)
shutil.copytree(src_webapps, dst_webapps)
src_lib = os.path.join(self.get_install_dir(), 'resources', product, 'gremlin-console', 'conf')
dst_lib = os.path.join(self.get_path(), 'resources', product, 'gremlin-console', 'conf')
if os.path.isdir(dst_lib):
common.rmdirs(dst_lib)
if os.path.exists(src_lib):
shutil.copytree(src_lib, dst_lib)
def import_bin_files(self):
common.copy_directory(os.path.join(self.get_install_dir(), 'bin'), self.get_bin_dir())
cassandra_bin_dir = os.path.join(self.get_path(), 'resources', 'cassandra', 'bin')
shutil.rmtree(cassandra_bin_dir, ignore_errors=True)
os.makedirs(cassandra_bin_dir)
common.copy_directory(os.path.join(self.get_install_dir(), 'resources', 'cassandra', 'bin'), cassandra_bin_dir)
if os.path.exists(os.path.join(self.get_install_dir(), 'resources', 'cassandra', 'tools')):
cassandra_tools_dir = os.path.join(self.get_path(), 'resources', 'cassandra', 'tools')
shutil.rmtree(cassandra_tools_dir, ignore_errors=True)
shutil.copytree(os.path.join(self.get_install_dir(), 'resources', 'cassandra', 'tools'), cassandra_tools_dir)
self.export_dse_home_in_dse_env_sh()
def export_dse_home_in_dse_env_sh(self):
'''
Due to the way CCM lays out files, separating the repository
from the node(s) confs, the `dse-env.sh` script of each node
needs to have its DSE_HOME var set and exported. Since DSE
4.5.x, the stock `dse-env.sh` file includes a commented-out
place to do exactly this, intended for installers.
Basically: read in the file, write it back out and add the two
lines.
'sstableloader' is an example of a node script that depends on
this, when used in a CCM-built cluster.
'''
with open(self.get_bin_dir() + "/dse-env.sh", "r") as dse_env_sh:
buf = dse_env_sh.readlines()
with open(self.get_bin_dir() + "/dse-env.sh", "w") as out_file:
for line in buf:
out_file.write(line)
if line == "# This is here so the installer can force set DSE_HOME\n":
out_file.write("DSE_HOME=" + self.get_install_dir() + "\nexport DSE_HOME\n")
def _update_log4j(self):
super(DseNode, self)._update_log4j()
conf_file = os.path.join(self.get_conf_dir(), common.LOG4J_CONF)
append_pattern = 'log4j.appender.V.File='
log_file = os.path.join(self.get_path(), 'logs', 'solrvalidation.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
append_pattern = 'log4j.appender.A.File='
log_file = os.path.join(self.get_path(), 'logs', 'audit.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
append_pattern = 'log4j.appender.B.File='
log_file = os.path.join(self.get_path(), 'logs', 'audit', 'dropped-events.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
def __update_yaml(self):
conf_file = os.path.join(self.get_path(), 'resources', 'dse', 'conf', 'dse.yaml')
with open(conf_file, 'r') as f:
data = yaml.load(f)
data['system_key_directory'] = os.path.join(self.get_path(), 'keys')
# Get a map of combined cluster and node configuration with the node
# configuration taking precedence.
full_options = common.merge_configuration(
self.cluster._dse_config_options,
self._dse_config_options, delete_empty=False)
# Merge options with original yaml data.
data = common.merge_configuration(data, full_options)
with open(conf_file, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
def __generate_server_xml(self):
server_xml = os.path.join(self.get_path(), 'resources', 'tomcat', 'conf', 'server.xml')
if os.path.isfile(server_xml):
os.remove(server_xml)
with open(server_xml, 'w+') as f:
f.write('<Server port="8005" shutdown="SHUTDOWN">\n')
f.write(' <Service name="Solr">\n')
f.write(' <Connector port="8983" address="%s" protocol="HTTP/1.1" connectionTimeout="20000" maxThreads = "200" URIEncoding="UTF-8"/>\n' % self.network_interfaces['thrift'][0])
f.write(' <Engine name="Solr" defaultHost="localhost">\n')
f.write(' <Host name="localhost" appBase="../solr/web"\n')
f.write(' unpackWARs="true" autoDeploy="true"\n')
f.write(' xmlValidation="false" xmlNamespaceAware="false">\n')
f.write(' </Host>\n')
f.write(' </Engine>\n')
f.write(' </Service>\n')
f.write('</Server>\n')
f.close()
def __update_gremlin_config_yaml(self):
(node_ip, _) = self.network_interfaces['binary']
conf_file = os.path.join(self.get_path(), 'resources', 'graph', 'gremlin-console', 'conf', 'remote.yaml')
with open(conf_file, 'r') as f:
data = yaml.load(f)
data['hosts'] = [node_ip]
with open(conf_file, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
def _get_directories(self):
dirs = []
for i in ['data', 'commitlogs', 'saved_caches', 'logs', 'bin', 'keys', 'resources', os.path.join('data', 'hints')]:
dirs.append(os.path.join(self.get_path(), i))
return dirs
def _copy_agent(self):
agent_source = os.path.join(self.get_install_dir(), 'datastax-agent')
agent_target = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_source) and not os.path.exists(agent_target):
shutil.copytree(agent_source, agent_target)
def _write_agent_address_yaml(self, agent_dir):
address_yaml = os.path.join(agent_dir, 'conf', 'address.yaml')
if not os.path.exists(address_yaml):
with open(address_yaml, 'w+') as f:
(ip, port) = self.network_interfaces['thrift']
jmx = self.jmx_port
f.write('stomp_interface: 127.0.0.1\n')
f.write('local_interface: %s\n' % ip)
f.write('agent_rpc_interface: %s\n' % ip)
f.write('agent_rpc_broadcast_address: %s\n' % ip)
f.write('cassandra_conf: %s\n' % os.path.join(self.get_path(), 'resources', 'cassandra', 'conf', 'cassandra.yaml'))
f.write('cassandra_install: %s\n' % self.get_path())
f.write('cassandra_logs: %s\n' % os.path.join(self.get_path(), 'logs'))
f.write('thrift_port: %s\n' % port)
f.write('jmx_port: %s\n' % jmx)
f.close()
def _write_agent_log4j_properties(self, agent_dir):
log4j_properties = os.path.join(agent_dir, 'conf', 'log4j.properties')
with open(log4j_properties, 'w+') as f:
f.write('log4j.rootLogger=INFO,R\n')
f.write('log4j.logger.org.apache.http=OFF\n')
f.write('log4j.logger.org.eclipse.jetty.util.log=WARN,R\n')
f.write('log4j.appender.R=org.apache.log4j.RollingFileAppender\n')
f.write('log4j.appender.R.maxFileSize=20MB\n')
f.write('log4j.appender.R.maxBackupIndex=5\n')
f.write('log4j.appender.R.layout=org.apache.log4j.PatternLayout\n')
f.write('log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %m%n\n')
f.write('log4j.appender.R.File=./log/agent.log\n')
f.close()
def _update_spark_env(self):
try:
node_num = re.search(u'node(\d+)', self.name).group(1)
except AttributeError:
node_num = 0
conf_file = os.path.join(self.get_path(), 'resources', 'spark', 'conf', 'spark-env.sh')
env = self.get_env()
content = []
with open(conf_file, 'r') as f:
for line in f.readlines():
for spark_var in env.keys():
if line.startswith('export %s=' % spark_var) or line.startswith('export %s=' % spark_var, 2):
line = 'export %s=%s\n' % (spark_var, env[spark_var])
break
content.append(line)
with open(conf_file, 'w') as f:
f.writelines(content)
# set unique spark.shuffle.service.port for each node; this is only needed for DSE 5.0.x;
# starting in 5.1 this setting is no longer needed
if self.cluster.version() > '5.0' and self.cluster.version() < '5.1':
defaults_file = os.path.join(self.get_path(), 'resources', 'spark', 'conf', 'spark-defaults.conf')
with open(defaults_file, 'a') as f:
port_num = 7737 + int(node_num)
f.write("\nspark.shuffle.service.port %s\n" % port_num)
# create Spark working dirs; starting with DSE 5.0.10/5.1.3 these are no longer automatically created
for e in ["SPARK_WORKER_DIR", "SPARK_LOCAL_DIRS"]:
dir = env[e]
if not os.path.exists(dir):
os.makedirs(dir)
|
bcantoni/ccm
|
ccmlib/dse_node.py
|
Python
|
apache-2.0
| 22,234
|