code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
"""
In this package, You can find test environment for Ella taggit unittest project.
As only true unittest and "unittest" (test testing programming unit, but using
database et al) are there, there is not much setup around.
"""
import os
test_runner = None
old_config = None
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_ella_taggit.settings'
def setup():
global test_runner
global old_config
from django.test.simple import DjangoTestSuiteRunner
test_runner = DjangoTestSuiteRunner()
test_runner.setup_test_environment()
old_config = test_runner.setup_databases()
def teardown():
test_runner.teardown_databases(old_config)
test_runner.teardown_test_environment()
| ella/ella-taggit | test_ella_taggit/__init__.py | Python | mit | 700 |
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The arg spec for the vyos facts module.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class FactsArgs(object): # pylint: disable=R0903
""" The arg spec for the vyos facts module
"""
def __init__(self, **kwargs):
pass
choices = [
'all',
'interfaces',
'!interfaces',
'l3_interfaces',
'!l3_interfaces',
'lag_interfaces',
'!lag_interfaces',
'lldp_global',
'!lldp_global',
'lldp_interfaces',
'!lldp_interfaces'
]
argument_spec = {
'gather_subset': dict(default=['!config'], type='list'),
'gather_network_resources': dict(choices=choices, type='list'),
}
| amenonsen/ansible | lib/ansible/module_utils/network/vyos/argspec/facts/facts.py | Python | gpl-3.0 | 863 |
import urllib, urllib2, re
def doit(op, **kw):
try:
kw['op'] = op
kw['pass'] = 'PASSWORD'
data = urllib.urlencode(kw)
req = urllib2.Request("http://console.existencia.org/ss/status.php", data)
response = urllib2.urlopen(req, timeout=5)
return response.read()
except:
print "Cannot perform status %s." % op
def getip():
try:
response = urllib2.urlopen('http://checkip.dyndns.com/')
data = str(response.read())
return re.compile(r'Address: (\d+\.\d+\.\d+\.\d+)').search(data).group(1)
except:
print "Cannot get IP address."
def register(server, name):
return doit('register', server=server, name=name)
def update(id, msgtype, message):
return doit('update', id=id, msgtype=msgtype, message=message)
def complete(id):
return update(id, 'done', 'done')
def notify(id, msgtype, message):
return doit('notify', id=id, msgtype=msgtype, message=message)
| jrising/console | local/status.py | Python | gpl-2.0 | 972 |
#!/usr/bin/env python
# 12.01.2007, c
from __future__ import absolute_import
from argparse import ArgumentParser
import sfepy
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.homogenization.band_gaps_app import AcousticBandGapsApp
from sfepy.base.plotutils import plt
helps = {
'debug':
'automatically start debugger when an exception is raised',
'filename' :
'basename of output file(s) [default: <basename of input file>]',
'detect_band_gaps' :
'detect frequency band gaps',
'analyze_dispersion' :
'analyze dispersion properties (low frequency domain)',
'plot' :
'plot frequency band gaps, assumes -b',
'phase_velocity' :
'compute phase velocity (frequency-independet mass only)'
}
def main():
parser = ArgumentParser()
parser.add_argument("--version", action="version",
version="%(prog)s " + sfepy.__version__)
parser.add_argument('--debug',
action='store_true', dest='debug',
default=False, help=helps['debug'])
parser.add_argument("-o", metavar='filename',
action="store", dest="output_filename_trunk",
default=None, help=helps['filename'])
parser.add_argument("-b", "--band-gaps",
action="store_true", dest="detect_band_gaps",
default=False, help=helps['detect_band_gaps'])
parser.add_argument("-d", "--dispersion",
action="store_true", dest="analyze_dispersion",
default=False, help=helps['analyze_dispersion'])
parser.add_argument("-p", "--plot",
action="store_true", dest="plot",
default=False, help=helps['plot'])
parser.add_argument("--phase-velocity",
action="store_true", dest="phase_velocity",
default=False, help=helps['phase_velocity'])
parser.add_argument("filename_in")
options = parser.parse_args()
if options.debug:
from sfepy.base.base import debug_on_error; debug_on_error()
if options.plot:
if plt is None:
output('matplotlib.pyplot cannot be imported, ignoring option -p!')
options.plot = False
elif options.analyze_dispersion == False:
options.detect_band_gaps = True
required, other = get_standard_keywords()
required.remove('equations')
if not options.analyze_dispersion:
required.remove('solver_[0-9]+|solvers')
if options.phase_velocity:
required.remove('ebc_[0-9]+|ebcs')
conf = ProblemConf.from_file(options.filename_in, required, other)
app = AcousticBandGapsApp(conf, options, 'phonon:')
opts = conf.options
if hasattr(opts, 'parametric_hook'): # Parametric study.
parametric_hook = conf.get_function(opts.parametric_hook)
app.parametrize(parametric_hook)
app()
if __name__ == '__main__':
main()
| lokik/sfepy | phonon.py | Python | bsd-3-clause | 3,037 |
{
'cisco':{
'hosts': ['al-switch1','al-switch2','l2-l3-1','l2-l3-2','provider','provider-bu','internet'],
'vars':{
'ansible_connection': 'local',
'hostname': '1.2.3.4',
'password': 'cisco',
'username': 'cisco'
}
},
'local':{
'hosts': ['localhost'],
'vars': {'ansible_connection': 'local'}
}
}
| joergullrich/virl-lab | dyn_inv.py | Python | gpl-3.0 | 319 |
'''
TelegramAPy
Copyright (C) 2015 Giove Andrea
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''
import re
import requests
from .exception import TelegramException, InvalidTokenException
from .types import Message, User, UserProfilePhotos, File, Update
class TelegramAPy:
"""Allow to send requests to the Telegram Bot API
Its methods take the name directly from the Telegram page and
allow you to send messages, send objects (photos, videos, ...) and
get updates using the Telegram inteface.
:param str token: Token generated by Telegram (BotFather)
Usage::
>>> from telegramapy.api import TelegramAPy
>>> api = TelegramAPy('TokenGeneratedByTelegram')
<TelegramAPy>
"""
TOKEN_REGEX = r'^[0-9]+\:[a-zA-Z0-9_\-]+$'
TELEGRAM_URL = 'https://api.telegram.org/'
METHOD_GETME = 'getMe'
METHOD_SENDMESSAGE = 'sendMessage'
METHOD_FORWARDMESSAGE = 'forwardMessage'
METHOD_SENDPHOTO = 'sendPhoto'
METHOD_SENDAUDIO = 'sendAudio'
METHOD_SENDDOCUMENT = 'sendDocument'
METHOD_SENDSTICKER = 'sendSticker'
METHOD_SENDVIDEO = 'sendVideo'
METHOD_SENDVOICE = 'sendVoice'
METHOD_SENDLOCATION = 'sendLocation'
METHOD_SENDCHATACTION = 'sendChatAction'
METHOD_GETUSERPROFILEPHOTOS = 'getUserProfilePhotos'
METHOD_GETFILE = 'getFile'
METHOD_GETUPDATES = 'getUpdates'
METHOD_SETWEBHOOK = 'setWebhook'
MARKDOWN_MODE = "markdown"
def __init__(self, token):
if re.match(TelegramAPy.TOKEN_REGEX, token) is None:
raise InvalidTokenException()
self._token = token
self._url = "%sbot%s/" % (TelegramAPy.TELEGRAM_URL, self._token)
self._file_url = "%sfile/bot%s/" % (TelegramAPy.TELEGRAM_URL,
self._token)
def __repr__(self):
return '<TelegramAPy>'
def getMe(self):
"""Returns a :class:`User <User>` with basic information
about the bot.
:return: :class:`User <User>` object
:rtype: telegramapy.types.User
"""
j = TelegramAPy._sendRequest(self._getUrl('getMe'))
return User.decode(j)
def sendMessage(self, chat_id, text, parse_mode=None,
disable_web_page_preview=None, reply_to_message_id=None,
reply_markup=None):
"""Sends a message to the specified chat_id.
:param chat_id: Unique identifier for the target chat or username
of the target channel.
:type chat_id: int or str
:param str text: Text of the message to be sent.
:param str parse_mode: Pass :attr:`TelegramAPy.MARKDOWN_MODE`
if you want to show bold, italic and inline URLs in your message.
:param bool disable_web_page_preview: Disables link previewsfor links
in this message.
:param int reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options. Pass a
:class:`ReplyKeyboardMarkup <ReplyKeyboardMarkup>` object,
:class:`ReplyKeyboardHide <ReplyKeyboardHide>` object or
:class:`ForceReply <ForceReply>` object.
:return: :class:`Message <Message>` object
:rtype: telegramapy.types.Message
"""
rep_markup = reply_markup.encode() if reply_markup else None
j = TelegramAPy._sendRequest(
self._getUrl(TelegramAPy.METHOD_SENDMESSAGE),
chat_id=chat_id, text=text, parse_mode=parse_mode,
reply_to_message_id=reply_to_message_id,
disable_web_page_preview=disable_web_page_preview,
reply_markup=rep_markup)
return Message.decode(j)
def forwardMessage(self, chat_id, from_chat_id, message_id):
"""Forwards a message.
:param chat_id: Unique identifier for the target chat or username
of the target channel.
:type chat_id: int or str
:param from_chat_id: Unique identifier for the chat where the original
message was sent.
:type from_chat_id: int or str
:param int message_id: Unique message identifier
:return: :class:`Message <Message>` object
:rtype: telegramapy.types.Message
"""
j = TelegramAPy._sendRequest(
self._getUrl(TelegramAPy.METHOD_FORWARDMESSAGE),
chat_id=chat_id, from_chat_id=from_chat_id, message_id=message_id)
return Message.decode(j)
def sendPhoto(self, chat_id, photo, is_path=True, caption=None,
reply_to_message_id=None, replay_markup=None):
"""Sends a photo to the specified chat.
:param chat_id: Unique identifier for the target chat or username
of the target channel.
:type chat_id: int or str
:param str photo: Photo to send. You can either pass a *file_id* as
String to resend a photo that is already on the Telegram server,
or the path of the photo if you want to upload a new photo.
:param bool is_path: True if you passed a path in the *photo* argument;
False if you passed a *file_id* in the *photo*.
:param str caption: Photo caption.
:param int reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options. Pass a
:class:`ReplyKeyboardMarkup <ReplyKeyboardMarkup>` object,
:class:`ReplyKeyboardHide <ReplyKeyboardHide>` object or
:class:`ForceReply <ForceReply>` object.
:return: :class:`Message <Message>` object
:rtype: telegramapy.types.Message
"""
if is_path:
files = {'photo': open(photo, 'rb')}
else:
files = {'photo': photo}
j = TelegramAPy._sendRequest(self._getUrl(TelegramAPy.METHOD_SENDPHOTO),
chat_id=chat_id, files=files,
is_file_path=is_path, caption=caption,
replay_markup=replay_markup,
reply_to_message_id=reply_to_message_id)
return Message.decode(j)
def sendAudio(self, chat_id, audio, is_path=True, duration=None,
performer=None, title=None,
reply_to_message_id=None, replay_markup=None):
"""Sends an audio to the specified chat.
:param chat_id: Unique identifier for the target chat or username
of the target channel.
:type chat_id: int or str
:param str audio: Audio to send. You can either pass a *file_id* as
String to resend an audio that is already on the Telegram server,
or the path of the audio if you want to upload a new audio.
:param bool is_path: True if you passed a path in the *audio* argument;
False if you passed a *file_id* in the *audio*.
:param int duration: Duration of the audio in seconds.
:param str performer: Performer.
:param str title: Track name.
:param int reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options. Pass a
:class:`ReplyKeyboardMarkup <ReplyKeyboardMarkup>` object,
:class:`ReplyKeyboardHide <ReplyKeyboardHide>` object or
:class:`ForceReply <ForceReply>` object.
:return: :class:`Message <Message>` object
:rtype: telegramapy.types.Message
"""
if is_path:
files = {'audio': open(audio, 'rb')}
else:
files = {'audio': audio}
j = TelegramAPy._sendRequest(self._getUrl(TelegramAPy.METHOD_SENDAUDIO),
chat_id=chat_id, files=files,
is_file_path=is_path, duration=duration,
performer=performer, title=title,
replay_markup=replay_markup,
reply_to_message_id=reply_to_message_id)
return Message.decode(j)
def sendDocument(self, chat_id, document, is_path=True,
reply_to_message_id=None, replay_markup=None):
"""Sends a document to the specified chat.
:param chat_id: Unique identifier for the target chat or username
of the target channel.
:type chat_id: int or str
:param str document: File to send. You can either pass a *file_id* as
String to resend an file that is already on the Telegram server,
or the path of the file if you want to upload a new file.
:param bool is_path: True if you passed a path in the *document*
argument; False if you passed a *file_id* in the *document*.
:param int reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options. Pass a
:class:`ReplyKeyboardMarkup <ReplyKeyboardMarkup>` object,
:class:`ReplyKeyboardHide <ReplyKeyboardHide>` object or
:class:`ForceReply <ForceReply>` object.
:return: :class:`Message <Message>` object
:rtype: telegramapy.types.Message
"""
if is_path:
files = {'document': open(document, 'rb')}
else:
files = {'document': document}
j = TelegramAPy._sendRequest(
self._getUrl(TelegramAPy.METHOD_SENDDOCUMENT),
files=files, is_file_path=is_path, chat_id=chat_id,
replay_markup=replay_markup,
reply_to_message_id=reply_to_message_id)
return Message.decode(j)
def sendSticker(self, chat_id, sticker, is_path=True,
reply_to_message_id=None, replay_markup=None):
"""Sends a sticker to the specified chat.
:param chat_id: Unique identifier for the target chat or username
of the target channel.
:type chat_id: int or str
:param str sticker: Sticker to send. You can either pass a *file_id* as
String to resend an sticker that is already on the Telegram server,
or the path of the sticker if you want to upload a new sticker.
:param bool is_path: True if you passed a path in the *sticker*
argument; False if you passed a *file_id* in the *sticker*.
:param int reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options. Pass a
:class:`ReplyKeyboardMarkup <ReplyKeyboardMarkup>` object,
:class:`ReplyKeyboardHide <ReplyKeyboardHide>` object or
:class:`ForceReply <ForceReply>` object.
:return: :class:`Message <Message>` object
:rtype: telegramapy.types.Message
"""
if is_path:
files = {'sticker': open(sticker, 'rb')}
else:
files = {'sticker': sticker}
j = TelegramAPy._sendRequest(
self._getUrl(TelegramAPy.METHOD_SENDSTICKER), files=files,
is_file_path=is_path, chat_id=chat_id, replay_markup=replay_markup,
reply_to_message_id=reply_to_message_id)
return Message.decode(j)
def sendVideo(self, chat_id, video, is_path=True, duration=None,
caption=None, reply_to_message_id=None, replay_markup=None):
"""Sends an video to the specified chat.
:param chat_id: Unique identifier for the target chat or username
of the target channel.
:type chat_id: int or str
:param str video: Video to send. You can either pass a *file_id* as
String to resend an video that is already on the Telegram server,
or the path of the video if you want to upload a new video.
:param bool is_path: True if you passed a path in the *video* argument;
False if you passed a *file_id* in the *video*.
:param int duration: Duration of the video in seconds.
:param str caption: Video caption.
:param int reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options. Pass a
:class:`ReplyKeyboardMarkup <ReplyKeyboardMarkup>` object,
:class:`ReplyKeyboardHide <ReplyKeyboardHide>` object or
:class:`ForceReply <ForceReply>` object.
:return: :class:`Message <Message>` object
:rtype: telegramapy.types.Message
"""
if is_path:
files = {'video': open(video, 'rb')}
else:
files = {'video': video}
j = TelegramAPy._sendRequest(self._getUrl(TelegramAPy.METHOD_SENDVIDEO),
files=files, is_file_path=is_path,
chat_id=chat_id, duration=duration,
caption=caption,
replay_markup=replay_markup,
reply_to_message_id=reply_to_message_id)
return Message.decode(j)
def sendVoice(self, chat_id, voice, is_path=True, duration=None,
reply_to_message_id=None, replay_markup=None):
"""Sends voice message to the specified chat.
:param chat_id: Unique identifier for the target chat or username
of the target channel.
:type chat_id: int or str
:param str voice: Voice to send. You can either pass a *file_id* as
String to resend an voice that is already on the Telegram server,
or the path of the voice if you want to upload a new voice.
:param bool is_path: True if you passed a path in the *voice* argument;
False if you passed a *file_id* in the *voice*.
:param int duration: Duration of the voice in seconds.
:param int reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options. Pass a
:class:`ReplyKeyboardMarkup <ReplyKeyboardMarkup>` object,
:class:`ReplyKeyboardHide <ReplyKeyboardHide>` object or
:class:`ForceReply <ForceReply>` object.
:return: :class:`Message <Message>` object
:rtype: telegramapy.types.Message
"""
if is_path:
files = {'voice': open(voice, 'rb')}
else:
files = {'voice': voice}
j = TelegramAPy._sendRequest(self._getUrl(TelegramAPy.METHOD_SENDVOICE),
files=files, is_file_path=is_path,
chat_id=chat_id, duration=duration,
replay_markup=replay_markup,
reply_to_message_id=reply_to_message_id)
return Message.decode(j)
def sendLocation(self, chat_id, latitude, longitude,
reply_to_message_id=None, replay_markup=None):
"""Sends location to the specified chat.
:param chat_id: Unique identifier for the target chat or username
of the target channel.
:type chat_id: int or str
:param int latitude: Latitude of location.
::param int longitude: Longitude of location.
:param int reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options. Pass a
:class:`ReplyKeyboardMarkup <ReplyKeyboardMarkup>` object,
:class:`ReplyKeyboardHide <ReplyKeyboardHide>` object or
:class:`ForceReply <ForceReply>` object.
:return: :class:`Message <Message>` object
:rtype: telegramapy.types.Message
"""
j = TelegramAPy._sendRequest(
self._getUrl(TelegramAPy.METHOD_SENDLOCATION), chat_id=chat_id,
latitude=latitude, longitude=longitude,
replay_markup=replay_markup,
reply_to_message_id=reply_to_message_id)
return Message.decode(j)
def sendChatAction(self, chat_id, action):
"""Sends a chat action to the specified chat.
:param chat_id: Unique identifier for the target chat or username
of the target channel.
:type chat_id: int or str
:param str action: Type of action to broadcast.
"""
TelegramAPy._sendRequest(
self._getUrl(TelegramAPy.METHOD_SENDCHATACTION), chat_id=chat_id,
action=action)
def getUserProfilePhotos(self, user_id, offset=None, limit=None):
"""Returns a list of profile pictures for a user.
:param int user_id: Unique identifier of the target user.
:param int offset: Sequential number of the first photo to be returned.
By default, all photos are returned.
:param int limit: Limits the number of photos to be retrieved. Values
between 1 - 100 are accepted. Defaults to 100.
:return: :class:`UserProfilePhotos <UserProfilePhotos>` object
:rtype: telegramapy.types.UserProfilePhotos
"""
j = TelegramAPy._sendRequest(
self._getUrl(TelegramAPy.METHOD_GETUSERPROFILEPHOTOS),
user_id=user_id, offset=offset, limit=limit)
return UserProfilePhotos.decode(j)
def getFile(self, file_id):
"""Use this method to basic info about a file and repare it for downloading.
:param str file_id: File identifier to get info about.
:return: :class:`File <File>` object
:rtype: telegramapy.types.File
"""
j = TelegramAPy._sendRequest(self._getUrl(TelegramAPy.METHOD_GETFILE),
file_id=file_id)
return File.decode(j)
def getUpdates(self, offset=None, limit=None, timeout=None):
"""Use to receive incoming updates using long polling.
:param int offset: Identifier of the first update to be returned.
:param int limit: Limits the number of updates t be retrieved. Values
between 1 - 100 are accepted. Defaults to 100.
:param int timeout: Timeout in seconds for long polling.
:return: List of :class:`Update <Update>` object
:rtype: [telegramapy.types.Update]
"""
j = TelegramAPy._sendRequest(
self._getUrl(TelegramAPy.METHOD_GETUPDATES), offset=offset,
limit=limit, timeout=timeout)
ris = []
for el in j:
ris.append(Update.decode(el))
return ris
def setWebhook(self, url=None, certificate_path=None):
"""Use this method to specify a url and receive incoming updates via
an outgoing webhook.
:param str url: HTTPS url to send updates to. Use an empty string to
remove webhook intergration.
:param str certificate_path: Path of the certificate that Telegram
will use to validate the connection.
"""
if certificate_path:
files = {'certificate': open(certificate_path, 'rb')}
else:
files = {}
TelegramAPy._sendRequest(self._getUrl(TelegramAPy.METHOD_SETWEBHOOK),
files=files, url=url)
def downloadFile(self, file_, file_path):
"""Download a file from Telegram servers.
:param file_: :class:`File <File>` object to download.
:param file_path: Path where to save the downloaded file.
"""
req = requests.get(self._file_url + file_.file_path, stream=True)
if req.status_code == 200:
with open(file_path, 'wb') as f:
for chunk in req:
f.write(chunk)
else:
raise TelegramException('Unable to download file.')
def _getUrl(self, method):
return self._url + method
@staticmethod
def _sendRequest(url_, files={}, is_file_path=True, **kwargs):
data = {}
for key, value in kwargs.iteritems():
if value:
data[key] = value
if is_file_path:
req = requests.post(url_, files=files, data=data)
else:
kwargs.update(files)
req = requests.post(url_, data=data)
j = req.json()
if 'ok' not in j or not j['ok']:
raise TelegramException(j)
return j['result']
| aadeg/TelegramAPy | telegramapy/api.py | Python | gpl-2.0 | 21,062 |
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Dan Eicher.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# <pep8 compliant>
import bpy
from pie_menu import MenuItem, PieMenu, PiePropSlider
import pie_menu_utils as pmu
import modal_behavior
import math
import blf
import bgl
import random
default_keybind = 'TAB'
bl_info = {
"name": "Pie: Mode Menu",
"author": "Dan Eicher, Sean Olson",
"version": (0, 1, 0),
"blender": (2, 6, 4),
"location": "View3D > Q-key",
"description": "3d View modes pie menu",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "3D View"
}
class VIEW3D_MT_Mode_Menu(bpy.types.Operator):
'''Mode Menu'''
bl_idname = "view3d.mode_menu"
bl_label = "Pie Mode Menu"
@classmethod
def poll(cls, context):
return context.area.type == 'VIEW_3D'
def modal(self, context, event):
context.area.tag_redraw()
ret_val = modal_behavior.slider_modal(self, context, event) #could it be this simple?
return ret_val
def invoke(self, context, event):
if context.selected_objects:
self.current = None
self.mouse_drag = False #this might be a good property for the PieMenu isntead of the Operator? Brainstorm on that.
current_keybind = bpy.context.window_manager.keyconfigs.user.keymaps['Object Non-modal'].keymap_items['view3d.mode_menu'].type
# generate menu content
self.menu = menu_init(PieMenu(context, x=event.mouse_region_x,
y=event.mouse_region_y,
keybind = current_keybind, #TODO: Should this come from some other place, not in the file...how would the user change it.
layout_radius=80,
text_size=11,
text_dpi=72,
center_radius_squared=225,
max_radius_squared=62500#22500
))
context.window_manager.modal_handler_add(self)
pmu.callback_register(self,context)
return {'RUNNING_MODAL'}
else:
return{'CANCELLED'}
def menu_init(menu):
#Add regular Items
menu.menu_items.append(Sculpt("Sculpt Mode", 0, 55,icon="SCULPTMODE_HLT"))
menu.menu_items.append(Pose("Pose Mode", 75, 30,icon="POSE_HLT"))
menu.menu_items.append(Edit("Edit Mode", 100, 0,icon="EDITMODE_HLT"))
menu.menu_items.append(WeightPaint("Weight Paint", 75, -30,icon="WPAINT_HLT"))
menu.menu_items.append(TexturePaint("Texture Paint", 0, -55,icon="TPAINT_HLT"))
menu.menu_items.append(VertexPaint("Vertex Paint", -75, -30,icon="VPAINT_HLT"))
menu.menu_items.append(Object("Object Mode", -100, 0,icon="OBJECT_DATAMODE"))
menu.menu_items.append(Particle("Particle Mode", -75, 30, icon="PARTICLEMODE"))
#Add any sliders
#id, data, property, width, height, x, y, rot = 0, scale = 1, rad = 5)
#menu.sliders.append(PiePropSlider("Diffuse",bpy.context.object.material_slots[0].material,"diffuse_intensity", 100,10,0,85))
#menu.sliders.append(PiePropSlider("Diffuse_2", bpy.context.object.material_slots[0].material, "alpha", 100, 10, -175, 0, rot = math.pi/2))
#do initial calcing and laying out
menu.calc_text() #need to figure out the box size from text..only once...not every draw.
menu.calc_boxes()
menu.layout_predefined(auto_slice = True) #this includes updating the local box coords to screen coords
return menu
class Object(MenuItem):
def op(self, parent, context):
bpy.ops.object.mode_set(mode = 'OBJECT')
bpy.ops.ed.undo_push(message="Enter Object Mode")
class Edit(MenuItem):
def op(self, parent, context):
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.ed.undo_push(message="Enter Edit Mode")
def poll(self, context):
objecttype=bpy.context.object.type
if objecttype=='LATTICE' or objecttype=='MESH' or objecttype=='CURVE' or objecttype=='SURFACE' or objecttype=='META' or objecttype=='FONT' or objecttype=='ARMATURE':
return True
else:
return False
class Sculpt(MenuItem):
def op(self, parent, context):
bpy.ops.object.mode_set(mode = 'SCULPT')
def poll(self, context):
if bpy.context.object.type=='MESH':
return True
else:
return False
class VertexPaint(MenuItem):
def op(self, parent, context):
bpy.ops.object.mode_set(mode = 'VERTEX_PAINT')
def poll(self, context):
if bpy.context.object.type=='MESH':
return True
else:
return False
class TexturePaint(MenuItem):
def op(self, parent, context):
bpy.ops.object.mode_set(mode = 'TEXTURE_PAINT')
def poll(self, context):
if bpy.context.object.type=='MESH':
return True
else:
return False
class WeightPaint(MenuItem):
def op(self, parent, context):
bpy.ops.object.mode_set(mode = 'WEIGHT_PAINT')
def poll(self, context):
if bpy.context.object.type=='MESH':
return True
else:
return False
class Pose(MenuItem):
def op(self, parent, context):
bpy.ops.object.mode_set(mode = 'POSE')
def poll(self, context):
if bpy.context.object.type == 'ARMATURE':
return True
else:
return False
class Particle(MenuItem):
def op(self, parent, context):
if bpy.ops.particle.particle_edit_toggle.poll():
bpy.ops.particle.particle_edit_toggle()
def poll(self, context):
if not len(bpy.context.object.particle_systems.items())==0:
return True
else:
return False
def setBind():
#disable the default keybinding
km = bpy.context.window_manager.keyconfigs.active.keymaps['Object Non-modal']
for kmi in km.keymap_items:
if kmi.idname == 'object.mode_set':
if kmi.type == default_keybind and kmi.ctrl==False and kmi.alt==False and kmi.shift==False and kmi.oskey==False and kmi.any==False and kmi.key_modifier=='NONE':
kmi.active=False
break
#enable the default keybinding
km = bpy.context.window_manager.keyconfigs.active.keymaps['Object Non-modal']
for kmi in km.keymap_items:
if kmi.idname == 'view3d.mode_menu':
if kmi.type == default_keybind and kmi.ctrl==False and kmi.alt==False and kmi.shift==False and kmi.oskey==False and kmi.any==False and kmi.key_modifier=='NONE':
kmi.active=True
break
def removeBind():
#disable the default keybinding
km = bpy.context.window_manager.keyconfigs.active.keymaps['Object Non-modal']
for kmi in km.keymap_items:
if kmi.idname == 'object.mode_set':
if kmi.type == default_keybind and kmi.ctrl==False and kmi.alt==False and kmi.shift==False and kmi.oskey==False and kmi.any==False and kmi.key_modifier=='NONE':
kmi.active=True
break
#enable the default keybinding
km = bpy.context.window_manager.keyconfigs.active.keymaps['Object Non-modal']
for kmi in km.keymap_items:
if kmi.idname == 'view3d.mode_menu':
if kmi.type == default_keybind and kmi.ctrl==False and kmi.alt==False and kmi.shift==False and kmi.oskey==False and kmi.any==False and kmi.key_modifier=='NONE':
kmi.active=False
break
def register():
bpy.utils.register_class(VIEW3D_MT_Mode_Menu)
#add the keybinding
km = bpy.context.window_manager.keyconfigs.active.keymaps['Object Non-modal']
km.keymap_items.new('view3d.mode_menu', default_keybind, 'PRESS')
#disable the default keybinding
km = bpy.context.window_manager.keyconfigs.active.keymaps['Object Non-modal']
for kmi in km.keymap_items:
if kmi.idname == 'object.mode_set':
if kmi.type == default_keybind and kmi.ctrl==False and kmi.alt==False and kmi.shift==False and kmi.oskey==False and kmi.any==False and kmi.key_modifier=='NONE':
kmi.active=False
break
def unregister():
#replace default
km = bpy.context.window_manager.keyconfigs.active.keymaps['Object Non-modal']
for kmi in km.keymap_items:
if kmi.idname == 'object.mode_set':
if kmi.type == default_keybind and kmi.ctrl==False and kmi.alt==False and kmi.shift==False and kmi.oskey==False and kmi.any==False and kmi.key_modifier=='NONE':
kmi.active=True
break
#remove pie key
for kmi in km.keymap_items:
if kmi.idname == 'view3d.mode_menu':
km.keymap_items.remove(kmi)
break
bpy.utils.unregister_class(VIEW3D_MT_Mode_Menu)
if __name__ == "__main__":
register()
| jemandez/creaturas-magicas | Configuraciones básicas/scripts/addons/piemenus/pie_modeMenu.py | Python | gpl-3.0 | 10,349 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The code is partially taken (and modified) from django rest framework
# that is licensed under the following terms:
#
# Copyright (c) 2011-2014, Tom Christie
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from . import views
from . import mixins
from . import pagination
from .settings import api_settings
from .utils import get_object_or_404
class GenericAPIView(pagination.PaginationMixin,
views.APIView):
"""
Base class for all other generic views.
"""
# You'll need to either set these attributes,
# or override `get_queryset()`/`get_serializer_class()`.
queryset = None
serializer_class = None
validator_class = None
# This shortcut may be used instead of setting either or both
# of the `queryset`/`serializer_class` attributes, although using
# the explicit style is generally preferred.
model = None
# If you want to use object lookups other than pk, set this attribute.
# For more complex lookup requirements override `get_object()`.
lookup_field = 'pk'
lookup_url_kwarg = None
# The filter backend classes to use for queryset filtering
filter_backends = api_settings.DEFAULT_FILTER_BACKENDS
# The following attributes may be subject to change,
# and should be considered private API.
model_serializer_class = api_settings.DEFAULT_MODEL_SERIALIZER_CLASS
model_validator_class = api_settings.DEFAULT_MODEL_VALIDATOR_CLASS
######################################
# These are pending deprecation...
pk_url_kwarg = 'pk'
slug_url_kwarg = 'slug'
slug_field = 'slug'
allow_empty = True
def get_extra_context(self):
"""
Extra context provided to the serializer class.
"""
return {
'request': self.request,
'format': self.format_kwarg,
'view': self
}
def get_serializer(self, instance=None, data=None,
files=None, many=False, partial=False):
"""
Return the serializer instance that should be used for deserializing
input, and for serializing output.
"""
serializer_class = self.get_serializer_class()
context = self.get_extra_context()
return serializer_class(instance, data=data, files=files,
many=many, partial=partial, context=context)
def get_validator(self, instance=None, data=None,
files=None, many=False, partial=False):
"""
Return the validator instance that should be used for validating the
input, and for serializing output.
"""
validator_class = self.get_validator_class()
context = self.get_extra_context()
return validator_class(instance, data=data, files=files,
many=many, partial=partial, context=context)
def filter_queryset(self, queryset, filter_backends=None):
"""
Given a queryset, filter it with whichever filter backend is in use.
You are unlikely to want to override this method, although you may need
to call it either from a list view, or from a custom `get_object`
method if you want to apply the configured filtering backend to the
default queryset.
"""
# NOTE TAIGA: Added filter_backends to overwrite the default behavior.
backends = filter_backends or self.get_filter_backends()
for backend in backends:
queryset = backend().filter_queryset(self.request, queryset, self)
return queryset
def get_filter_backends(self):
"""
Returns the list of filter backends that this view requires.
"""
filter_backends = self.filter_backends or []
if not filter_backends and hasattr(self, 'filter_backend'):
raise RuntimeError('The `filter_backend` attribute and `FILTER_BACKEND` setting '
'are due to be deprecated in favor of a `filter_backends` '
'attribute and `DEFAULT_FILTER_BACKENDS` setting, that take '
'a *list* of filter backend classes.')
return filter_backends
###########################################################
# The following methods provide default implementations #
# that you may want to override for more complex cases. #
###########################################################
def get_serializer_class(self):
if self.action == "list" and hasattr(self, "list_serializer_class"):
return self.list_serializer_class
serializer_class = self.serializer_class
if serializer_class is not None:
return serializer_class
assert self.model is not None, ("'%s' should either include a 'serializer_class' attribute, "
"or use the 'model' attribute as a shortcut for "
"automatically generating a serializer class." % self.__class__.__name__)
class DefaultSerializer(self.model_serializer_class):
class Meta:
model = self.model
return DefaultSerializer
def get_validator_class(self):
validator_class = self.validator_class
serializer_class = self.get_serializer_class()
# Situations where the validator is the rest framework serializer
if validator_class is None and serializer_class is not None:
return serializer_class
if validator_class is not None:
return validator_class
class DefaultValidator(self.model_validator_class):
class Meta:
model = self.model
return DefaultValidator
def get_queryset(self):
"""
Get the list of items for this view.
This must be an iterable, and may be a queryset.
Defaults to using `self.queryset`.
You may want to override this if you need to provide different
querysets depending on the incoming request.
(Eg. return a list of items that is specific to the user)
"""
if self.queryset is not None:
return self.queryset._clone()
if self.model is not None:
return self.model._default_manager.all()
raise ImproperlyConfigured(("'%s' must define 'queryset' or 'model'" % self.__class__.__name__))
def get_object(self, queryset=None):
"""
Returns the object the view is displaying.
You may want to override this if you need to provide non-standard
queryset lookups. Eg if objects are referenced using multiple
keyword arguments in the url conf.
"""
# Determine the base queryset to use.
if queryset is None:
queryset = self.filter_queryset(self.get_queryset())
else:
# NOTE: explicit exception for avoid and fix
# usage of deprecated way of get_object
raise RuntimeError("DEPRECATED")
# Perform the lookup filtering.
# Note that `pk` and `slug` are deprecated styles of lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
lookup = self.kwargs.get(lookup_url_kwarg, None)
pk = self.kwargs.get(self.pk_url_kwarg, None)
slug = self.kwargs.get(self.slug_url_kwarg, None)
if lookup is not None:
filter_kwargs = {self.lookup_field: lookup}
elif pk is not None and self.lookup_field == 'pk':
raise RuntimeError(('The `pk_url_kwarg` attribute is due to be deprecated. '
'Use the `lookup_field` attribute instead'))
elif slug is not None and self.lookup_field == 'pk':
raise RuntimeError(('The `slug_url_kwarg` attribute is due to be deprecated. '
'Use the `lookup_field` attribute instead'))
else:
raise ImproperlyConfigured(('Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, self.lookup_field)))
obj = get_object_or_404(queryset, **filter_kwargs)
return obj
def get_object_or_none(self):
try:
return self.get_object()
except Http404:
return None
###################################################
# The following are placeholder methods, #
# and are intended to be overridden. #
# #
# The are not called by GenericAPIView directly, #
# but are used by the mixin methods. #
###################################################
def pre_conditions_on_save(self, obj):
"""
Placeholder method called by mixins before save for check
some conditions before save.
"""
pass
def pre_conditions_on_delete(self, obj):
"""
Placeholder method called by mixins before delete for check
some conditions before delete.
"""
pass
def pre_save(self, obj):
"""
Placeholder method for calling before saving an object.
May be used to set attributes on the object that are implicit
in either the request, or the url.
"""
pass
def post_save(self, obj, created=False):
"""
Placeholder method for calling after saving an object.
"""
pass
def pre_delete(self, obj):
"""
Placeholder method for calling before deleting an object.
"""
pass
def post_delete(self, obj):
"""
Placeholder method for calling after deleting an object.
"""
pass
######################################################
# Concrete view classes that provide method handlers #
# by composing the mixin classes with the base view. #
# NOTE: not used by taiga. #
######################################################
class CreateAPIView(mixins.CreateModelMixin,
GenericAPIView):
"""
Concrete view for creating a model instance.
"""
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class ListAPIView(mixins.ListModelMixin,
GenericAPIView):
"""
Concrete view for listing a queryset.
"""
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class RetrieveAPIView(mixins.RetrieveModelMixin,
GenericAPIView):
"""
Concrete view for retrieving a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
class DestroyAPIView(mixins.DestroyModelMixin,
GenericAPIView):
"""
Concrete view for deleting a model instance.
"""
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
class UpdateAPIView(mixins.UpdateModelMixin,
GenericAPIView):
"""
Concrete view for updating a model instance.
"""
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class ListCreateAPIView(mixins.ListModelMixin,
mixins.CreateModelMixin,
GenericAPIView):
"""
Concrete view for listing a queryset or creating a model instance.
"""
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class RetrieveUpdateAPIView(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
GenericAPIView):
"""
Concrete view for retrieving, updating a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class RetrieveDestroyAPIView(mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
GenericAPIView):
"""
Concrete view for retrieving or deleting a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
class RetrieveUpdateDestroyAPIView(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
GenericAPIView):
"""
Concrete view for retrieving, updating or deleting a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
| dayatz/taiga-back | taiga/base/api/generics.py | Python | agpl-3.0 | 15,996 |
#!/usr/bin/env python
#
# Documentation:
# Purpose: This script helps generate migration file for ts-events child tables.
# Usage: ./scripts/migrations/create-ts-events.py 2017 > ./migrations/core/0033_add-ts-events-2017.up.sql
# Arguments:
# 1. The year. Example: 2017
#
# Examples:
# ./scripts/migrations/create-ts-events.py 2017 > ./migrations/core/0033_add-ts-events-2017.up.sql
# ./scripts/migrations/create-ts-events.py 2017 > ./migrations/ts-events/0033_add-ts-events-2017.up.sql
import os
import sys
if __name__ == '__main__':
template_up = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'migrations', 'pg', 'up', '0004_ts-events-2016.sql')
content_up = open(template_up, 'r').read()
print content_up.replace('2016', sys.argv[1])
| resourced/resourced-master | scripts/migrations/create-ts-events.py | Python | mit | 771 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 Language Technology, Technische Universitaet Darmstadt (author: Benjamin Milde)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import StringIO
import ctypes
import sys
import traceback
#
# A simple MARY TTS client (5.x) in Python, only generates intermidate MARY TTS phoneme format.
# This class reuses connections with the request framework (esp. helpful for bulk processing), but you have to install it:
# pip install requests
#
# Try http://mary.dfki.de:59125/ with a sentence of your choice and set OUTPUT TYPE to
#
class maryclient:
def __init__(self, connections = 2):
self.host = '127.0.0.1'
self.port = 59125
self.input_type = 'TEXT'
self.output_type = 'PHONEMES'
self.audio = 'WAVE_FILE'
self.locale = 'de'
self.voice = ''
self.reserve(connections)
def reserve(self,num_conn):
self.connection_pool = [requests.Session() for x in xrange(num_conn)]
def generate(self, message, connection_pool_num=0):
'''Sends the text string in message to the MARY server and
format.'''
assert(len(self.connection_pool) > connection_pool_num)
params = {'INPUT_TEXT': message.encode('utf-8'),
'INPUT_TYPE': self.input_type,
'OUTPUT_TYPE': self.output_type,
'LOCALE': self.locale
#'AUDIO': self.audio,
#'VOICE': self.voice,
}
r = self.connection_pool[connection_pool_num].post('http://'+self.host+':'+str(self.port)+'/process',data=params,timeout=(10.0,10.0))
if r.status_code != requests.codes.ok:
raise RuntimeError('error in http request:'+str(r.status_code))
returnbuffer = r.text
return returnbuffer
| tudarmstadt-lt/kaldi-tuda-de | s5/local/maryclient.py | Python | apache-2.0 | 2,370 |
# -*- coding: utf-8 -*-
# Import the AbstractUser model
from django.contrib.auth.models import AbstractUser
# Import the basic Django ORM models library
from django.db import models
from django.utils.translation import ugettext_lazy as _
# Subclass AbstractUser
class Chart(models.Model):
title = models.CharField(max_length=100)
def __unicode__(self):
return self.title
| lewchuk/chartio_embed | chartio_embed/charts/models.py | Python | bsd-3-clause | 392 |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""Panic mode handler."""
from google.appengine.api import users
from simian import settings
from simian.mac import admin
from simian.mac.common import mail
from simian.mac.munki import common
class AdminPanic(admin.AdminHandler):
"""Handler for /admin/panic."""
def get(self):
"""GET handler."""
if not self.IsAdminUser():
return
modes = []
for mode in common.PANIC_MODES:
d = {
'name': mode,
'enabled': common.IsPanicMode(mode),
}
modes.append(d)
self.Render(
'panic.html', {'modes': modes, 'report_type': 'panic'})
def post(self):
"""POST handler."""
if not self.IsAdminUser():
return
mode = self.request.get('mode')
enabled = self.request.get('enabled')
verify = self.request.get('verify')
if not verify:
self.Render(
'panic_set_verify.html',
{'mode': {'name': mode, 'enabled': enabled}, 'report_type': 'panic'})
else:
if enabled == 'disable':
enabled = False
elif enabled == 'enable':
enabled = True
else:
enabled = None
if enabled is None:
self.error(400)
else:
try:
common.SetPanicMode(mode, enabled)
if mail:
user = users.get_current_user()
subject = 'Panic Mode Update by %s' % user
body = '%s has set \'%s\' for Panic Mode.\n' % (user, enabled)
mail.SendMail(settings.EMAIL_ADMIN_LIST, subject, body)
self.redirect('/admin/panic')
except ValueError:
self.error(400)
| alexandregz/simian | src/simian/mac/admin/panic.py | Python | apache-2.0 | 2,215 |
"""
pip._vendor is for vendoring dependencies of pip to prevent needing pip to
depend on something external.
Files inside of pip._vendor should be considered immutable and should only be
updated to versions from upstream.
"""
from __future__ import absolute_import
| Orav/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/__init__.py | Python | lgpl-3.0 | 274 |
# -*- coding: utf-8 -*-
#
##################################################################################
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
import os
import osrframework.utils.configuration as configuration
import osrframework.utils.general as general
class OSRFrameworkException(Exception):
"""
Generic OSrframework Exception
It will be used to show warnings, i. e., any operation which throws an
exception but which does not stop OSRFramework from running.
Messages will be printed as warnings, in orange.
"""
def __init__(self, msg, *args, **kwargs):
Exception.__init__(self, general.warning(msg))
self.generic = "Generic OSRFramework exception."
class NoCredentialsException(OSRFrameworkException):
def __init__(self, platform, *args, **kwargs):
msg = """
[*] Warning:\t{}. Details:
No valid credentials provided for '{}'.
Update the configuration file at: '{}'.
""".format(
self.__class__.__name__,
platform,
os.path.join(configuration.getConfigPath()["appPath"], "accounts.cfg"),
general.emphasis("-x " + platform)
)
OSRFrameworkException.__init__(self, general.warning(msg))
self.generic = "The credentials for some platforms where NOT provided."
class OSRFrameworkError(Exception):
"""
Generic OSrframework Error
It will be used to show errors, i. e., any operation which throws an error
from which OSRFramework cannot get recovered.
Messages will be printed as errors, in red.
"""
def __init__(self, msg, *args, **kwargs):
Exception.__init__(self, "{}".format(general.error(msg)))
self.generic = "Generic OSRFramework error."
class NotImplementedModeError(OSRFrameworkError):
def __init__(self, platform, mode, *args, **kwargs):
msg = """
[*] Error:\t{}. Details:
The '{}' wrapper has tried to call 'self.do_{}(...)'.
The method seems be implemented wrongly or not implemented.""".format(
self.__class__.__name__,
platform,
mode
)
OSRFrameworkError.__init__(self, msg)
self.generic = "A wrapper has tried to launch a mode which is not yet implemented. This error should not be happening unless you have added a new method out of the standard ones for mailfy, phonefy, searchfy or usufy."
class BadImplementationError(OSRFrameworkError):
def __init__(self, original_message, *args, **kwargs):
msg = """
[*] Error:\t{}. Details:
{}.
{}""".format(
self.__class__.__name__,
original_message,
"The wrapper may be missing an attribute like self.creds empty list in its constructor."
)
OSRFrameworkError.__init__(self, msg)
self.generic = "A wrapper has launched an unexpected implementation error."
| i3visio/osrframework | osrframework/utils/exceptions.py | Python | agpl-3.0 | 3,670 |
# -*- coding: utf-8 -*-
"""Tests for nacelle's mail functionality
"""
# marty mcfly imports
from __future__ import absolute_import
# stdlib imports
import base64
# third-party imports
import webapp2
from nacelle.conf import settings
from nacelle.test.testcases import NacelleTestCase
# local imports
from nacelle.contrib import mail
from nacelle.contrib.mail import routes
wsgi = webapp2.WSGIApplication(routes.ROUTES, debug=True, config={
'webapp2_extras.sessions': {'secret_key': 'xxxxxxxxxxxxxxxxxxxxxx'},
})
# attach dispatcher and error_handler to the WSGI app
dispatcher = webapp2.import_string(settings.DISPATCHER_MODULE)
wsgi.router.set_dispatcher(dispatcher)
def _make_test_request(url, post_data=None, headers=None):
"""Make a test request against the app
"""
request = webapp2.Request.blank(url, POST=post_data, headers=headers)
return request.get_response(wsgi)
def _run_tasks(taskq_stub, q_name):
"""Since nose runs our tests single threaded, appengine can't run tests in
the background, thus we need to run them manually at the appropriate point
during our tests.
"""
tasks = taskq_stub.GetTasks(q_name)
taskq_stub.FlushQueue(q_name)
while tasks:
for task in tasks:
params = base64.b64decode(task["body"])
yield _make_test_request(task["url"], post_data=params, headers=[('X-AppEngine-TaskName', 'task1')])
tasks = taskq_stub.GetTasks(q_name)
taskq_stub.FlushQueue(q_name)
def _test_data(**overrides):
"""Returns a valid set of test data to use during API integration tests
overrides allows the caller to replace one or more items in the returned
dictionary without having to specify the entire thing every time.
"""
test_data = {
'sender': 'someemail@somedomain.com',
'to': 'someotheremail@somedomain.com',
'subject': 'Just a test email',
'body': 'Just some test content'
}
for key, value in overrides.items():
test_data[key] = value
return test_data
class MailTests(NacelleTestCase):
"""Test nacelle's email functionality
"""
def test_valid_email(self):
"""Test that sending a valid email succeeds
"""
mail.send(**_test_data())
messages = self.mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
self.assertEqual('someotheremail@somedomain.com', messages[0].to)
def test_valid_email_empty_body(self):
"""Test that sending a valid email (with an empty body) succeeds
"""
mail.send(**_test_data(body=''))
messages = self.mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
self.assertEqual('someotheremail@somedomain.com', messages[0].to)
def test_valid_email_no_body(self):
"""Test that sending a valid email (with no body) succeeds
"""
_td = _test_data()
del _td['body']
mail.send(**_td)
messages = self.mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
self.assertEqual('someotheremail@somedomain.com', messages[0].to)
def test_invalid_email_empty_sender(self):
"""Test that sending an invalid email (empty sender) raises the appropriate exception
"""
_td = _test_data(sender='')
with self.assertRaises(mail.InvalidEmailError):
mail.send(**_td)
def test_invalid_email_empty_to(self):
"""Test that sending an invalid email (empty to) raises the appropriate exception
"""
_td = _test_data(to='')
with self.assertRaises(mail.InvalidEmailError):
mail.send(**_td)
def test_invalid_no_sender(self):
"""Test that sending an invalid email (no sender) raises the appropriate exception
"""
_td = _test_data()
del _td['sender']
with self.assertRaises(mail.MissingSenderError):
mail.send(**_td)
def test_invalid_no_to(self):
"""Test that sending an invalid email (no to) raises the appropriate exception
"""
_td = _test_data()
del _td['to']
with self.assertRaises(mail.MissingRecipientsError):
mail.send(**_td)
def test_valid_email_async(self):
"""Test that sending a valid email asynchronously succeeds
"""
mail.send_async(**_test_data())
# check that mail hasn't been sent yet
messages = self.mail_stub.get_sent_messages()
self.assertEqual(0, len(messages))
# run the queued tasks and check they succeed
for response in _run_tasks(self.taskq_stub, settings.EMAIL_QUEUE):
self.assertEqual(response.status_int, 200)
# check that mail was actually sent after the que's been run
messages = self.mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
self.assertEqual('someotheremail@somedomain.com', messages[0].to)
| nacelle/nacelle | nacelle/contrib/mail/tests.py | Python | mit | 4,967 |
import hashlib
import json
import os
import uuid
from django import forms
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from modelcluster.contrib.taggit import ClusterTaggableManager
from modelcluster.fields import ParentalKey, ParentalManyToManyField
from modelcluster.models import ClusterableModel
from taggit.managers import TaggableManager
from taggit.models import ItemBase, TagBase, TaggedItemBase
from wagtail.admin.edit_handlers import (
FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel,
TabbedInterface)
from wagtail.admin.forms import WagtailAdminPageForm
from wagtail.admin.mail import send_mail
from wagtail.contrib.forms.forms import FormBuilder
from wagtail.contrib.forms.models import (
FORM_FIELD_CHOICES, AbstractEmailForm, AbstractFormField, AbstractFormSubmission)
from wagtail.contrib.forms.views import SubmissionsListView
from wagtail.contrib.settings.models import BaseSetting, register_setting
from wagtail.contrib.sitemaps import Sitemap
from wagtail.contrib.table_block.blocks import TableBlock
from wagtail.core.blocks import CharBlock, RawHTMLBlock, RichTextBlock, StructBlock
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Orderable, Page, PageManager, PageQuerySet, Task
from wagtail.documents.edit_handlers import DocumentChooserPanel
from wagtail.documents.models import AbstractDocument, Document
from wagtail.images.blocks import ImageChooserBlock
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.images.models import AbstractImage, AbstractRendition, Image
from wagtail.search import index
from wagtail.snippets.edit_handlers import SnippetChooserPanel
from wagtail.snippets.models import register_snippet
from wagtail.utils.decorators import cached_classmethod
from .forms import FormClassAdditionalFieldPageForm, ValidatedPageForm
EVENT_AUDIENCE_CHOICES = (
('public', "Public"),
('private', "Private"),
)
COMMON_PANELS = (
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('show_in_menus'),
FieldPanel('search_description'),
)
# Link fields
class LinkFields(models.Model):
link_external = models.URLField("External link", blank=True)
link_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
related_name='+',
on_delete=models.CASCADE
)
link_document = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
related_name='+',
on_delete=models.CASCADE
)
@property
def link(self):
if self.link_page:
return self.link_page.url
elif self.link_document:
return self.link_document.url
else:
return self.link_external
panels = [
FieldPanel('link_external'),
PageChooserPanel('link_page'),
DocumentChooserPanel('link_document'),
]
class Meta:
abstract = True
# Carousel items
class CarouselItem(LinkFields):
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
embed_url = models.URLField("Embed URL", blank=True)
caption = models.CharField(max_length=255, blank=True)
panels = [
ImageChooserPanel('image'),
FieldPanel('embed_url'),
FieldPanel('caption'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class Meta:
abstract = True
# Related links
class RelatedLink(LinkFields):
title = models.CharField(max_length=255, help_text="Link title")
panels = [
FieldPanel('title'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class Meta:
abstract = True
# Simple page
class SimplePage(Page):
content = models.TextField()
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('content'),
]
def get_admin_display_title(self):
return "%s (simple page)" % super().get_admin_display_title()
# Page with Excluded Fields when copied
class PageWithExcludedCopyField(Page):
content = models.TextField()
# Exclude this field from being copied
special_field = models.CharField(
blank=True, max_length=255, default='Very Special')
exclude_fields_in_copy = ['special_field']
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('special_field'),
FieldPanel('content'),
]
class PageWithOldStyleRouteMethod(Page):
"""
Prior to Wagtail 0.4, the route() method on Page returned an HttpResponse
rather than a Page instance. As subclasses of Page may override route,
we need to continue accepting this convention (albeit as a deprecated API).
"""
content = models.TextField()
template = 'tests/simple_page.html'
def route(self, request, path_components):
return self.serve(request)
# File page
class FilePage(Page):
file_field = models.FileField()
FilePage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('file_field'),
]
# Event page
class EventPageCarouselItem(Orderable, CarouselItem):
page = ParentalKey('tests.EventPage', related_name='carousel_items', on_delete=models.CASCADE)
class EventPageRelatedLink(Orderable, RelatedLink):
page = ParentalKey('tests.EventPage', related_name='related_links', on_delete=models.CASCADE)
class EventPageSpeakerAward(Orderable, models.Model):
speaker = ParentalKey('tests.EventPageSpeaker', related_name='awards', on_delete=models.CASCADE)
name = models.CharField("Award name", max_length=255)
date_awarded = models.DateField(null=True, blank=True)
panels = [
FieldPanel('name'),
FieldPanel('date_awarded'),
]
class EventPageSpeaker(Orderable, LinkFields, ClusterableModel):
page = ParentalKey('tests.EventPage', related_name='speakers', related_query_name='speaker', on_delete=models.CASCADE)
first_name = models.CharField("Name", max_length=255, blank=True)
last_name = models.CharField("Surname", max_length=255, blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def name_display(self):
return self.first_name + " " + self.last_name
panels = [
FieldPanel('first_name'),
FieldPanel('last_name'),
ImageChooserPanel('image'),
MultiFieldPanel(LinkFields.panels, "Link"),
InlinePanel('awards', label="Awards"),
]
class EventCategory(models.Model):
name = models.CharField("Name", max_length=255)
def __str__(self):
return self.name
# Override the standard WagtailAdminPageForm to add validation on start/end dates
# that appears as a non-field error
class EventPageForm(WagtailAdminPageForm):
def clean(self):
cleaned_data = super().clean()
# Make sure that the event starts before it ends
start_date = cleaned_data['date_from']
end_date = cleaned_data['date_to']
if start_date and end_date and start_date > end_date:
raise ValidationError('The end date must be after the start date')
return cleaned_data
class EventPage(Page):
date_from = models.DateField("Start date", null=True)
date_to = models.DateField(
"End date",
null=True,
blank=True,
help_text="Not required if event is on a single day"
)
time_from = models.TimeField("Start time", null=True, blank=True)
time_to = models.TimeField("End time", null=True, blank=True)
audience = models.CharField(max_length=255, choices=EVENT_AUDIENCE_CHOICES)
location = models.CharField(max_length=255)
body = RichTextField(blank=True)
cost = models.CharField(max_length=255)
signup_link = models.URLField(blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
categories = ParentalManyToManyField(EventCategory, blank=True)
search_fields = [
index.SearchField('get_audience_display'),
index.SearchField('location'),
index.SearchField('body'),
index.FilterField('url_path'),
]
password_required_template = 'tests/event_page_password_required.html'
base_form_class = EventPageForm
EventPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('date_from'),
FieldPanel('date_to'),
FieldPanel('time_from'),
FieldPanel('time_to'),
FieldPanel('location'),
FieldPanel('audience'),
FieldPanel('cost'),
FieldPanel('signup_link'),
InlinePanel('carousel_items', label="Carousel items"),
FieldPanel('body', classname="full"),
InlinePanel('speakers', label="Speakers", heading="Speaker lineup"),
InlinePanel('related_links', label="Related links"),
FieldPanel('categories'),
# InlinePanel related model uses `pk` not `id`
InlinePanel('head_counts', label='Head Counts'),
]
EventPage.promote_panels = [
MultiFieldPanel(COMMON_PANELS, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
class HeadCountRelatedModelUsingPK(models.Model):
"""Related model that uses a custom primary key (pk) not id"""
custom_id = models.AutoField(primary_key=True)
event_page = ParentalKey(
EventPage,
on_delete=models.CASCADE,
related_name='head_counts'
)
head_count = models.IntegerField()
panels = [FieldPanel('head_count')]
# Override the standard WagtailAdminPageForm to add field that is not in model
# so that we can test additional potential issues like comparing versions
class FormClassAdditionalFieldPage(Page):
location = models.CharField(max_length=255)
body = RichTextField(blank=True)
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('location'),
FieldPanel('body'),
FieldPanel('code'), # not in model, see set base_form_class
]
base_form_class = FormClassAdditionalFieldPageForm
# Just to be able to test multi table inheritance
class SingleEventPage(EventPage):
excerpt = models.TextField(
max_length=255,
blank=True,
null=True,
help_text="Short text to describe what is this action about"
)
# Give this page model a custom URL routing scheme
def get_url_parts(self, request=None):
url_parts = super().get_url_parts(request=request)
if url_parts is None:
return None
else:
site_id, root_url, page_path = url_parts
return (site_id, root_url, page_path + 'pointless-suffix/')
def route(self, request, path_components):
if path_components == ['pointless-suffix']:
# treat this as equivalent to a request for this page
return super().route(request, [])
else:
# fall back to default routing rules
return super().route(request, path_components)
def get_admin_display_title(self):
return "%s (single event)" % super().get_admin_display_title()
SingleEventPage.content_panels = [FieldPanel('excerpt')] + EventPage.content_panels
# "custom" sitemap object
class EventSitemap(Sitemap):
pass
# Event index (has a separate AJAX template, and a custom template context)
class EventIndex(Page):
intro = RichTextField(blank=True)
ajax_template = 'tests/includes/event_listing.html'
def get_events(self):
return self.get_children().live().type(EventPage)
def get_paginator(self):
return Paginator(self.get_events(), 4)
def get_context(self, request, page=1):
# Pagination
paginator = self.get_paginator()
try:
events = paginator.page(page)
except PageNotAnInteger:
events = paginator.page(1)
except EmptyPage:
events = paginator.page(paginator.num_pages)
# Update context
context = super().get_context(request)
context['events'] = events
return context
def route(self, request, path_components):
if self.live and len(path_components) == 1:
try:
return self.serve(request, page=int(path_components[0]))
except (TypeError, ValueError):
pass
return super().route(request, path_components)
def get_static_site_paths(self):
# Get page count
page_count = self.get_paginator().num_pages
# Yield a path for each page
for page in range(page_count):
yield '/%d/' % (page + 1)
# Yield from superclass
for path in super().get_static_site_paths():
yield path
def get_sitemap_urls(self, request=None):
# Add past events url to sitemap
return super().get_sitemap_urls(request=request) + [
{
'location': self.full_url + 'past/',
'lastmod': self.latest_revision_created_at
}
]
def get_cached_paths(self):
return super().get_cached_paths() + [
'/past/'
]
EventIndex.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full"),
]
class FormField(AbstractFormField):
page = ParentalKey('FormPage', related_name='form_fields', on_delete=models.CASCADE)
class FormPage(AbstractEmailForm):
def get_context(self, request):
context = super().get_context(request)
context['greeting'] = "hello world"
return context
# This is redundant (SubmissionsListView is the default view class), but importing
# SubmissionsListView in this models.py helps us to confirm that this recipe
# https://docs.wagtail.io/en/stable/reference/contrib/forms/customisation.html#customise-form-submissions-listing-in-wagtail-admin
# works without triggering circular dependency issues -
# see https://github.com/wagtail/wagtail/issues/6265
submissions_list_view_class = SubmissionsListView
FormPage.content_panels = [
FieldPanel('title', classname="full title"),
InlinePanel('form_fields', label="Form fields"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
# FormPage with a non-HTML extension
class JadeFormField(AbstractFormField):
page = ParentalKey('JadeFormPage', related_name='form_fields', on_delete=models.CASCADE)
class JadeFormPage(AbstractEmailForm):
template = "tests/form_page.jade"
JadeFormPage.content_panels = [
FieldPanel('title', classname="full title"),
InlinePanel('form_fields', label="Form fields"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
# Form page that redirects to a different page
class RedirectFormField(AbstractFormField):
page = ParentalKey('FormPageWithRedirect', related_name='form_fields', on_delete=models.CASCADE)
class FormPageWithRedirect(AbstractEmailForm):
thank_you_redirect_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
def get_context(self, request):
context = super(FormPageWithRedirect, self).get_context(request)
context['greeting'] = "hello world"
return context
def render_landing_page(self, request, form_submission=None, *args, **kwargs):
"""
Renders the landing page OR if a receipt_page_redirect is chosen redirects to this page.
"""
if self.thank_you_redirect_page:
return redirect(self.thank_you_redirect_page.url, permanent=False)
return super(FormPageWithRedirect, self).render_landing_page(request, form_submission, *args, **kwargs)
FormPageWithRedirect.content_panels = [
FieldPanel('title', classname="full title"),
PageChooserPanel('thank_you_redirect_page'),
InlinePanel('form_fields', label="Form fields"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
# FormPage with a custom FormSubmission
class FormPageWithCustomSubmission(AbstractEmailForm):
"""
This Form page:
* Have custom submission model
* Have custom related_name (see `FormFieldWithCustomSubmission.page`)
* Saves reference to a user
* Doesn't render html form, if submission for current user is present
"""
intro = RichTextField(blank=True)
thank_you_text = RichTextField(blank=True)
def get_context(self, request, *args, **kwargs):
context = super().get_context(request)
context['greeting'] = "hello world"
return context
def get_form_fields(self):
return self.custom_form_fields.all()
def get_data_fields(self):
data_fields = [
('useremail', 'User email'),
]
data_fields += super().get_data_fields()
return data_fields
def get_submission_class(self):
return CustomFormPageSubmission
def process_form_submission(self, form):
form_submission = self.get_submission_class().objects.create(
form_data=json.dumps(form.cleaned_data, cls=DjangoJSONEncoder),
page=self, user=form.user
)
if self.to_address:
addresses = [x.strip() for x in self.to_address.split(',')]
content = '\n'.join([x[1].label + ': ' + str(form.data.get(x[0])) for x in form.fields.items()])
send_mail(self.subject, content, addresses, self.from_address,)
# process_form_submission should now return the created form_submission
return form_submission
def serve(self, request, *args, **kwargs):
if self.get_submission_class().objects.filter(page=self, user__pk=request.user.pk).exists():
return TemplateResponse(
request,
self.template,
self.get_context(request)
)
return super().serve(request, *args, **kwargs)
FormPageWithCustomSubmission.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full"),
InlinePanel('custom_form_fields', label="Form fields"),
FieldPanel('thank_you_text', classname="full"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
class FormFieldWithCustomSubmission(AbstractFormField):
page = ParentalKey(FormPageWithCustomSubmission, on_delete=models.CASCADE, related_name='custom_form_fields')
class CustomFormPageSubmission(AbstractFormSubmission):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
def get_data(self):
form_data = super().get_data()
form_data.update({
'useremail': self.user.email,
})
return form_data
# Custom form page with custom submission listing view and form submission
class FormFieldForCustomListViewPage(AbstractFormField):
page = ParentalKey(
'FormPageWithCustomSubmissionListView',
related_name='form_fields',
on_delete=models.CASCADE
)
class FormPageWithCustomSubmissionListView(AbstractEmailForm):
"""Form Page with customised submissions listing view"""
intro = RichTextField(blank=True)
thank_you_text = RichTextField(blank=True)
def get_submissions_list_view_class(self):
from .views import CustomSubmissionsListView
return CustomSubmissionsListView
def get_submission_class(self):
return CustomFormPageSubmission
def get_data_fields(self):
data_fields = [
('useremail', 'User email'),
]
data_fields += super().get_data_fields()
return data_fields
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full"),
InlinePanel('form_fields', label="Form fields"),
FieldPanel('thank_you_text', classname="full"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
# FormPage with cutom FormBuilder
EXTENDED_CHOICES = FORM_FIELD_CHOICES + (('ipaddress', 'IP Address'),)
class ExtendedFormField(AbstractFormField):
"""Override the field_type field with extended choices."""
page = ParentalKey(
'FormPageWithCustomFormBuilder',
related_name='form_fields',
on_delete=models.CASCADE)
field_type = models.CharField(
verbose_name='field type', max_length=16, choices=EXTENDED_CHOICES)
class CustomFormBuilder(FormBuilder):
"""
A custom FormBuilder that has an 'ipaddress' field with
customised create_singleline_field with shorter max_length
"""
def create_singleline_field(self, field, options):
options['max_length'] = 120 # usual default is 255
return forms.CharField(**options)
def create_ipaddress_field(self, field, options):
return forms.GenericIPAddressField(**options)
class FormPageWithCustomFormBuilder(AbstractEmailForm):
"""
A Form page that has a custom form builder and uses a custom
form field model with additional field_type choices.
"""
form_builder = CustomFormBuilder
content_panels = [
FieldPanel('title', classname="full title"),
InlinePanel('form_fields', label="Form fields"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
# Snippets
class AdvertPlacement(models.Model):
page = ParentalKey('wagtailcore.Page', related_name='advert_placements', on_delete=models.CASCADE)
advert = models.ForeignKey('tests.Advert', related_name='+', on_delete=models.CASCADE)
colour = models.CharField(max_length=255)
class AdvertTag(TaggedItemBase):
content_object = ParentalKey('Advert', related_name='tagged_items', on_delete=models.CASCADE)
class Advert(ClusterableModel):
url = models.URLField(null=True, blank=True)
text = models.CharField(max_length=255)
tags = TaggableManager(through=AdvertTag, blank=True)
panels = [
FieldPanel('url'),
FieldPanel('text'),
FieldPanel('tags'),
]
def __str__(self):
return self.text
register_snippet(Advert)
class AdvertWithCustomPrimaryKey(ClusterableModel):
advert_id = models.CharField(max_length=255, primary_key=True)
url = models.URLField(null=True, blank=True)
text = models.CharField(max_length=255)
panels = [
FieldPanel('url'),
FieldPanel('text'),
]
def __str__(self):
return self.text
register_snippet(AdvertWithCustomPrimaryKey)
class AdvertWithCustomUUIDPrimaryKey(ClusterableModel):
advert_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
url = models.URLField(null=True, blank=True)
text = models.CharField(max_length=255)
panels = [
FieldPanel('url'),
FieldPanel('text'),
]
def __str__(self):
return self.text
register_snippet(AdvertWithCustomUUIDPrimaryKey)
class AdvertWithTabbedInterface(models.Model):
url = models.URLField(null=True, blank=True)
text = models.CharField(max_length=255)
something_else = models.CharField(max_length=255)
advert_panels = [
FieldPanel('url'),
FieldPanel('text'),
]
other_panels = [
FieldPanel('something_else'),
]
edit_handler = TabbedInterface([
ObjectList(advert_panels, heading='Advert'),
ObjectList(other_panels, heading='Other'),
])
def __str__(self):
return self.text
class Meta:
ordering = ('text',)
register_snippet(AdvertWithTabbedInterface)
class StandardIndex(Page):
""" Index for the site """
parent_page_types = [Page]
# A custom panel setup where all Promote fields are placed in the Content tab instead;
# we use this to test that the 'promote' tab is left out of the output when empty
StandardIndex.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('seo_title'),
FieldPanel('slug'),
InlinePanel('advert_placements', label="Adverts"),
]
StandardIndex.promote_panels = []
class StandardChild(Page):
pass
# Test overriding edit_handler with a custom one
StandardChild.edit_handler = TabbedInterface([
ObjectList(StandardChild.content_panels, heading='Content'),
ObjectList(StandardChild.promote_panels, heading='Promote'),
ObjectList(StandardChild.settings_panels, heading='Settings', classname='settings'),
ObjectList([], heading='Dinosaurs'),
], base_form_class=WagtailAdminPageForm)
class BusinessIndex(Page):
""" Can be placed anywhere, can only have Business children """
subpage_types = ['tests.BusinessChild', 'tests.BusinessSubIndex']
class BusinessSubIndex(Page):
""" Can be placed under BusinessIndex, and have BusinessChild children """
# BusinessNowherePage is 'incorrectly' added here as a possible child.
# The rules on BusinessNowherePage prevent it from being a child here though.
subpage_types = ['tests.BusinessChild', 'tests.BusinessNowherePage']
parent_page_types = ['tests.BusinessIndex', 'tests.BusinessChild']
class BusinessChild(Page):
""" Can only be placed under Business indexes, no children allowed """
subpage_types = []
parent_page_types = ['tests.BusinessIndex', BusinessSubIndex]
class BusinessNowherePage(Page):
""" Not allowed to be placed anywhere """
parent_page_types = []
class TaggedPageTag(TaggedItemBase):
content_object = ParentalKey('tests.TaggedPage', related_name='tagged_items', on_delete=models.CASCADE)
class TaggedPage(Page):
tags = ClusterTaggableManager(through=TaggedPageTag, blank=True)
TaggedPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('tags'),
]
class SingletonPage(Page):
@classmethod
def can_create_at(cls, parent):
# You can only create one of these!
return super(SingletonPage, cls).can_create_at(parent) \
and not cls.objects.exists()
class SingletonPageViaMaxCount(Page):
max_count = 1
class PageChooserModel(models.Model):
page = models.ForeignKey('wagtailcore.Page', help_text='help text', on_delete=models.CASCADE)
class EventPageChooserModel(models.Model):
page = models.ForeignKey('tests.EventPage', help_text='more help text', on_delete=models.CASCADE)
class SnippetChooserModel(models.Model):
advert = models.ForeignKey(Advert, help_text='help text', on_delete=models.CASCADE)
panels = [
SnippetChooserPanel('advert'),
]
class SnippetChooserModelWithCustomPrimaryKey(models.Model):
advertwithcustomprimarykey = models.ForeignKey(AdvertWithCustomPrimaryKey, help_text='help text', on_delete=models.CASCADE)
panels = [
SnippetChooserPanel('advertwithcustomprimarykey'),
]
class CustomImage(AbstractImage):
caption = models.CharField(max_length=255, blank=True)
fancy_caption = RichTextField(blank=True)
not_editable_field = models.CharField(max_length=255, blank=True)
admin_form_fields = Image.admin_form_fields + (
'caption',
'fancy_caption',
)
class Meta:
unique_together = [
('title', 'collection')
]
class CustomRendition(AbstractRendition):
image = models.ForeignKey(CustomImage, related_name='renditions', on_delete=models.CASCADE)
class Meta:
unique_together = (
('image', 'filter_spec', 'focal_point_key'),
)
# Custom image model with a required field
class CustomImageWithAuthor(AbstractImage):
author = models.CharField(max_length=255)
admin_form_fields = Image.admin_form_fields + (
'author',
)
class CustomRenditionWithAuthor(AbstractRendition):
image = models.ForeignKey(CustomImageWithAuthor, related_name='renditions', on_delete=models.CASCADE)
class Meta:
unique_together = (
('image', 'filter_spec', 'focal_point_key'),
)
class CustomDocument(AbstractDocument):
description = models.TextField(blank=True)
fancy_description = RichTextField(blank=True)
admin_form_fields = Document.admin_form_fields + (
'description',
'fancy_description'
)
class Meta:
unique_together = [
('title', 'collection')
]
class StreamModel(models.Model):
body = StreamField([
('text', CharBlock()),
('rich_text', RichTextBlock()),
('image', ImageChooserBlock()),
])
class ExtendedImageChooserBlock(ImageChooserBlock):
"""
Example of Block with custom get_api_representation method.
If the request has an 'extended' query param, it returns a dict of id and title,
otherwise, it returns the default value.
"""
def get_api_representation(self, value, context=None):
image_id = super().get_api_representation(value, context=context)
if 'request' in context and context['request'].query_params.get('extended', False):
return {
'id': image_id,
'title': value.title
}
return image_id
class StreamPage(Page):
body = StreamField([
('text', CharBlock()),
('rich_text', RichTextBlock()),
('image', ExtendedImageChooserBlock()),
('product', StructBlock([
('name', CharBlock()),
('price', CharBlock()),
])),
('raw_html', RawHTMLBlock()),
])
api_fields = ('body',)
content_panels = [
FieldPanel('title'),
StreamFieldPanel('body'),
]
preview_modes = []
class DefaultStreamPage(Page):
body = StreamField([
('text', CharBlock()),
('rich_text', RichTextBlock()),
('image', ImageChooserBlock()),
], default='')
content_panels = [
FieldPanel('title'),
StreamFieldPanel('body'),
]
class MTIBasePage(Page):
is_creatable = False
class Meta:
verbose_name = "MTI Base page"
class MTIChildPage(MTIBasePage):
# Should be creatable by default, no need to set anything
pass
class AbstractPage(Page):
class Meta:
abstract = True
@register_setting
class TestSetting(BaseSetting):
title = models.CharField(max_length=100)
email = models.EmailField(max_length=50)
@register_setting
class ImportantPages(BaseSetting):
sign_up_page = models.ForeignKey(
'wagtailcore.Page', related_name="+", null=True, on_delete=models.SET_NULL)
general_terms_page = models.ForeignKey(
'wagtailcore.Page', related_name="+", null=True, on_delete=models.SET_NULL)
privacy_policy_page = models.ForeignKey(
'wagtailcore.Page', related_name="+", null=True, on_delete=models.SET_NULL)
@register_setting(icon="tag")
class IconSetting(BaseSetting):
pass
class NotYetRegisteredSetting(BaseSetting):
pass
@register_setting
class FileUploadSetting(BaseSetting):
file = models.FileField()
class BlogCategory(models.Model):
name = models.CharField(unique=True, max_length=80)
class BlogCategoryBlogPage(models.Model):
category = models.ForeignKey(BlogCategory, related_name="+", on_delete=models.CASCADE)
page = ParentalKey('ManyToManyBlogPage', related_name='categories', on_delete=models.CASCADE)
panels = [
FieldPanel('category'),
]
class ManyToManyBlogPage(Page):
"""
A page type with two different kinds of M2M relation.
We don't formally support these, but we don't want them to cause
hard breakages either.
"""
body = RichTextField(blank=True)
adverts = models.ManyToManyField(Advert, blank=True)
blog_categories = models.ManyToManyField(
BlogCategory, through=BlogCategoryBlogPage, blank=True)
# make first_published_at editable on this page model
settings_panels = Page.settings_panels + [
FieldPanel('first_published_at'),
]
class OneToOnePage(Page):
"""
A Page containing a O2O relation.
"""
body = RichTextBlock(blank=True)
page_ptr = models.OneToOneField(Page, parent_link=True,
related_name='+', on_delete=models.CASCADE)
class GenericSnippetPage(Page):
"""
A page containing a reference to an arbitrary snippet (or any model for that matter)
linked by a GenericForeignKey
"""
snippet_content_type = models.ForeignKey(ContentType, on_delete=models.SET_NULL, null=True)
snippet_object_id = models.PositiveIntegerField(null=True)
snippet_content_object = GenericForeignKey('snippet_content_type', 'snippet_object_id')
class CustomImageFilePath(AbstractImage):
def get_upload_to(self, filename):
"""Create a path that's file-system friendly.
By hashing the file's contents we guarantee an equal distribution
of files within our root directories. This also gives us a
better chance of uploading images with the same filename, but
different contents - this isn't guaranteed as we're only using
the first three characters of the checksum.
"""
original_filepath = super().get_upload_to(filename)
folder_name, filename = original_filepath.split(os.path.sep)
# Ensure that we consume the entire file, we can't guarantee that
# the stream has not be partially (or entirely) consumed by
# another process
original_position = self.file.tell()
self.file.seek(0)
hash256 = hashlib.sha256()
while True:
data = self.file.read(256)
if not data:
break
hash256.update(data)
checksum = hash256.hexdigest()
self.file.seek(original_position)
return os.path.join(folder_name, checksum[:3], filename)
class CustomPageQuerySet(PageQuerySet):
def about_spam(self):
return self.filter(title__contains='spam')
CustomManager = PageManager.from_queryset(CustomPageQuerySet)
class CustomManagerPage(Page):
objects = CustomManager()
class MyBasePage(Page):
"""
A base Page model, used to set site-wide defaults and overrides.
"""
objects = CustomManager()
class Meta:
abstract = True
class MyCustomPage(MyBasePage):
pass
class ValidatedPage(Page):
foo = models.CharField(max_length=255)
base_form_class = ValidatedPageForm
content_panels = Page.content_panels + [
FieldPanel('foo'),
]
class DefaultRichTextFieldPage(Page):
body = RichTextField()
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('body'),
]
class DefaultRichBlockFieldPage(Page):
body = StreamField([
('rich_text', RichTextBlock()),
])
content_panels = Page.content_panels + [
StreamFieldPanel('body')
]
class CustomRichTextFieldPage(Page):
body = RichTextField(editor='custom')
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('body'),
]
class CustomRichBlockFieldPage(Page):
body = StreamField([
('rich_text', RichTextBlock(editor='custom')),
])
content_panels = [
FieldPanel('title', classname="full title"),
StreamFieldPanel('body'),
]
class RichTextFieldWithFeaturesPage(Page):
body = RichTextField(features=['quotation', 'embed', 'made-up-feature'])
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('body'),
]
# a page that only contains RichTextField within an InlinePanel,
# to test that the inline child's form media gets pulled through
class SectionedRichTextPageSection(Orderable):
page = ParentalKey('tests.SectionedRichTextPage', related_name='sections', on_delete=models.CASCADE)
body = RichTextField()
panels = [
FieldPanel('body')
]
class SectionedRichTextPage(Page):
content_panels = [
FieldPanel('title', classname="full title"),
InlinePanel('sections')
]
class InlineStreamPageSection(Orderable):
page = ParentalKey('tests.InlineStreamPage', related_name='sections', on_delete=models.CASCADE)
body = StreamField([
('text', CharBlock()),
('rich_text', RichTextBlock()),
('image', ImageChooserBlock()),
])
panels = [
StreamFieldPanel('body')
]
class InlineStreamPage(Page):
content_panels = [
FieldPanel('title', classname="full title"),
InlinePanel('sections')
]
class TableBlockStreamPage(Page):
table = StreamField([('table', TableBlock())])
content_panels = [StreamFieldPanel('table')]
class UserProfile(models.Model):
# Wagtail's schema must be able to coexist alongside a custom UserProfile model
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
favourite_colour = models.CharField(max_length=255)
class PanelSettings(TestSetting):
panels = [
FieldPanel('title')
]
class TabbedSettings(TestSetting):
edit_handler = TabbedInterface([
ObjectList([
FieldPanel('title')
], heading='First tab'),
ObjectList([
FieldPanel('email')
], heading='Second tab'),
])
class AlwaysShowInMenusPage(Page):
show_in_menus_default = True
# test for AddField migrations on StreamFields using various default values
class AddedStreamFieldWithoutDefaultPage(Page):
body = StreamField([
('title', CharBlock())
])
class AddedStreamFieldWithEmptyStringDefaultPage(Page):
body = StreamField([
('title', CharBlock())
], default='')
class AddedStreamFieldWithEmptyListDefaultPage(Page):
body = StreamField([
('title', CharBlock())
], default=[])
# test customising edit handler definitions on a per-request basis
class PerUserContentPanels(ObjectList):
def _replace_children_with_per_user_config(self):
self.children = self.instance.basic_content_panels
if self.request.user.is_superuser:
self.children = self.instance.superuser_content_panels
self.children = [
child.bind_to(model=self.model, instance=self.instance,
request=self.request, form=self.form)
for child in self.children]
def on_instance_bound(self):
# replace list of children when both instance and request are available
if self.request:
self._replace_children_with_per_user_config()
else:
super().on_instance_bound()
def on_request_bound(self):
# replace list of children when both instance and request are available
if self.instance:
self._replace_children_with_per_user_config()
else:
super().on_request_bound()
class PerUserPageMixin:
basic_content_panels = []
superuser_content_panels = []
@cached_classmethod
def get_edit_handler(cls):
tabs = []
if cls.basic_content_panels and cls.superuser_content_panels:
tabs.append(PerUserContentPanels(heading='Content'))
if cls.promote_panels:
tabs.append(ObjectList(cls.promote_panels,
heading='Promote'))
if cls.settings_panels:
tabs.append(ObjectList(cls.settings_panels,
heading='Settings',
classname='settings'))
edit_handler = TabbedInterface(tabs,
base_form_class=cls.base_form_class)
return edit_handler.bind_to(model=cls)
class SecretPage(PerUserPageMixin, Page):
boring_data = models.TextField()
secret_data = models.TextField()
basic_content_panels = Page.content_panels + [
FieldPanel('boring_data'),
]
superuser_content_panels = basic_content_panels + [
FieldPanel('secret_data'),
]
class SimpleParentPage(Page):
# `BusinessIndex` has been added to bring it in line with other tests
subpage_types = ['tests.SimpleChildPage', BusinessIndex]
class SimpleChildPage(Page):
# `Page` has been added to bring it in line with other tests
parent_page_types = ['tests.SimpleParentPage', Page]
max_count_per_parent = 1
class PersonPage(Page):
first_name = models.CharField(
max_length=255,
verbose_name='First Name',
)
last_name = models.CharField(
max_length=255,
verbose_name='Last Name',
)
content_panels = Page.content_panels + [
MultiFieldPanel([
FieldPanel('first_name'),
FieldPanel('last_name'),
], 'Person'),
InlinePanel('addresses', label='Address'),
]
class Meta:
verbose_name = 'Person'
verbose_name_plural = 'Persons'
class Address(index.Indexed, ClusterableModel, Orderable):
address = models.CharField(
max_length=255,
verbose_name='Address',
)
tags = ClusterTaggableManager(
through='tests.AddressTag',
blank=True,
)
person = ParentalKey(
to='tests.PersonPage',
related_name='addresses',
verbose_name='Person'
)
panels = [
FieldPanel('address'),
FieldPanel('tags'),
]
class Meta:
verbose_name = 'Address'
verbose_name_plural = 'Addresses'
class AddressTag(TaggedItemBase):
content_object = ParentalKey(
to='tests.Address',
on_delete=models.CASCADE,
related_name='tagged_items'
)
class RestaurantPage(Page):
tags = ClusterTaggableManager(through='tests.TaggedRestaurant', blank=True)
content_panels = Page.content_panels + [
FieldPanel('tags'),
]
class RestaurantTag(TagBase):
free_tagging = False
class Meta:
verbose_name = "Tag"
verbose_name_plural = "Tags"
class TaggedRestaurant(ItemBase):
tag = models.ForeignKey(
RestaurantTag, related_name="tagged_restaurants", on_delete=models.CASCADE
)
content_object = ParentalKey(
to='tests.RestaurantPage',
on_delete=models.CASCADE,
related_name='tagged_items'
)
class SimpleTask(Task):
pass
| takeflight/wagtail | wagtail/tests/testapp/models.py | Python | bsd-3-clause | 43,462 |
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import tensorflow.compat.v2 as tf
class SinusoidalsDS():
def __init__(self):
None
def _create_task(self):
A = np.random.uniform(low=.1, high=.5)
ph = np.random.uniform(low=0., high=np.pi)
return A, ph
def _create_instance(self, A, ph, inner_batch_size, num_steps):
x = np.random.uniform(
low=-5., high=5., size=(num_steps, inner_batch_size, 1)).astype(
np.float32)
y = A * np.sin(x + ph)
return x, y
def _generator(self, inner_batch_size, num_steps):
while True:
A, ph = self._create_task()
xt, yt = self._create_instance(A, ph, inner_batch_size, num_steps)
xe, ye = self._create_instance(A, ph, inner_batch_size, num_steps)
yield xt, yt, xe, ye
def create_ds(self, outer_batch_size, inner_batch_size, num_steps):
return tf.data.Dataset.from_generator(
lambda:self._generator(inner_batch_size, num_steps),
output_types=(tf.float32, tf.float32, tf.float32, tf.float32)
).batch(outer_batch_size)
| google-research/self-organising-systems | mplp/mplp/sinusoidals.py | Python | apache-2.0 | 1,573 |
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.utils.domutils
import os.path
schema_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../schemas/test-include-daq.xsd'))
code = pyxb.binding.generate.GeneratePython(schema_location=schema_path)
#open('code.py', 'w').write(code)
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestIncludeDD (unittest.TestCase):
def testDefault (self):
xmls = '<entry xmlns="%s"><from>one</from><to>single</to></entry>' % (Namespace.uri(),)
instance = CreateFromDocument(xmls.encode('utf-8'))
self.assertEqual(english.one, instance.from_)
def testExplicit (self):
xmls = '<ns:entry xmlns:ns="%s"><ns:from>one</ns:from><ns:to>single</ns:to></ns:entry>' % (Namespace.uri(),)
instance = CreateFromDocument(xmls.encode('utf-8'))
self.assertEqual(english.one, instance.from_)
if __name__ == '__main__':
unittest.main()
| CantemoInternal/pyxb | tests/drivers/test-include-daq.py | Python | apache-2.0 | 1,094 |
import json
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from student.models import Registration
from django.test import TestCase
def get_request_for_user(user):
"""Create a request object for user."""
request = RequestFactory()
request.user = user
request.COOKIES = {}
request.META = {}
request.is_secure = lambda: True
request.get_host = lambda: "edx.org"
request.method = 'GET'
return request
class LoginEnrollmentTestCase(TestCase):
"""
Provides support for user creation,
activation, login, and course enrollment.
"""
def setup_user(self):
"""
Create a user account, activate, and log in.
"""
self.email = 'foo@test.com'
self.password = 'bar'
self.username = 'test'
self.user = self.create_account(self.username,
self.email, self.password)
self.activate_user(self.email)
self.login(self.email, self.password)
def assert_request_status_code(self, status_code, url, method="GET", **kwargs):
make_request = getattr(self.client, method.lower())
response = make_request(url, **kwargs)
self.assertEqual(
response.status_code, status_code,
"{method} request to {url} returned status code {actual}, "
"expected status code {expected}".format(
method=method, url=url,
actual=response.status_code, expected=status_code
)
)
return response
# ============ User creation and login ==============
def login(self, email, password):
"""
Login, check that the corresponding view's response has a 200 status code.
"""
resp = self.client.post(reverse('login'),
{'email': email, 'password': password})
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
self.assertTrue(data['success'])
def logout(self):
"""
Logout; check that the HTTP response code indicates redirection
as expected.
"""
# should redirect
self.assert_request_status_code(302, reverse('logout'))
def create_account(self, username, email, password):
"""
Create the account and check that it worked.
"""
url = reverse('create_account')
request_data = {
'username': username,
'email': email,
'password': password,
'name': 'username',
'terms_of_service': 'true',
'honor_code': 'true',
}
resp = self.assert_request_status_code(200, url, method="POST", data=request_data)
data = json.loads(resp.content)
self.assertEqual(data['success'], True)
# Check both that the user is created, and inactive
user = User.objects.get(email=email)
self.assertFalse(user.is_active)
return user
def activate_user(self, email):
"""
Look up the activation key for the user, then hit the activate view.
No error checking.
"""
activation_key = Registration.objects.get(user__email=email).activation_key
# and now we try to activate
url = reverse('activate', kwargs={'key': activation_key})
self.assert_request_status_code(200, url)
# Now make sure that the user is now actually activated
self.assertTrue(User.objects.get(email=email).is_active)
def enroll(self, course, verify=False):
"""
Try to enroll and return boolean indicating result.
`course` is an instance of CourseDescriptor.
`verify` is an optional boolean parameter specifying whether we
want to verify that the student was successfully enrolled
in the course.
"""
resp = self.client.post(reverse('change_enrollment'), {
'enrollment_action': 'enroll',
'course_id': course.id.to_deprecated_string(),
})
result = resp.status_code == 200
if verify:
self.assertTrue(result)
return result
def unenroll(self, course):
"""
Unenroll the currently logged-in user, and check that it worked.
`course` is an instance of CourseDescriptor.
"""
url = reverse('change_enrollment')
request_data = {
'enrollment_action': 'unenroll',
'course_id': course.id.to_deprecated_string(),
}
self.assert_request_status_code(200, url, method="POST", data=request_data)
| carsongee/edx-platform | lms/djangoapps/courseware/tests/helpers.py | Python | agpl-3.0 | 4,674 |
import re
from core.actionModule import actionModule
from core.keystore import KeyStore as kb
from core.utils import Utils
class scan_anonldap(actionModule):
def __init__(self, config, display, lock):
super(scan_anonldap, self).__init__(config, display, lock)
self.title = "Test for Anonymous LDAP Searches"
self.shortName = "AnonymousLDAP"
self.description = "execute [ldapsearch -h <server> -p 389 -x -s base"
self.requirements = ["ldapsearch"]
self.triggers = ["newService_ldap", "newPort_tcp_389"]
self.safeLevel = 5
def getTargets(self):
self.targets = kb.get('service/ldap')
def process(self):
# load any targets we are interested in
self.getTargets()
callFire = False
# loop over each target
for t in self.targets:
# verify we have not tested this host before
if not self.seentarget(t):
self.display.verbose(self.shortName + " - Connecting to " + t)
# add the new IP to the already seen list
self.addseentarget(t)
# make outfile
outfile = self.config["proofsDir"] + self.shortName + "_" + t + "_" + Utils.getRandStr(10)
# run rpcclient
command = self.config["ldapsearch"] + " -h " + t + " -p 389 -x -s base"
result = Utils.execWait(command, outfile)
# TODO - Parse output and do stuff
parts = re.findall("ref: .*", result)
for part in parts:
callFire = True
self.addVuln(t, "AnonymousLDAP", {"port": "389", "message": str(part).replace("/", "%2F"), "output": outfile.replace("/", "%2F")})
if callFire:
self.fire("anonymousLDAP")
return
| MooseDojo/apt2 | modules/action/scan_anonldap.py | Python | mit | 1,841 |
melody_pad_pattern = {
"Center+Halfmoon-Alt-LR": [4, 0, 1, 2, 3, 5, 12, 6, 11, 7, 10, 8, 9],
"Center+Halfmoon-Alt-LR (no M5)": [0, 1, 2, 3, 5, 12, 6, 11, 7, 10, 8, 9],
"Center+Halfmoon-Alt-LR (no M5,M1,M2)": [2, 3, 5, 12, 6, 11, 7, 10, 8, 9],
"Center+Halfmoon-Alt-LR (Handpan style)": [0, 1, 2, 3, 5, 12, 6, 11, 7, 10, 8, 9, 4],
"Center+Halfmoon-Alt-LR (Handpan style, no M0,M1)": [2, 3, 5, 12, 6, 11, 7, 10, 8, 9, 4],
"Center+Halfmoon-CW": [4, 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12],
"Center+Halfmoon-CW (no M5)": [0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12],
"Center+Halfmoon-CW (no M5,M1,M2)": [2, 3, 5, 6, 7, 8, 9, 10, 11, 12],
"Center+Halfmoon-CCW": [4, 0, 1, 2, 3, 12, 11, 10, 9, 8, 7, 6, 5],
"Center+Halfmoon-CCW (no M5)": [0, 1, 2, 3, 12, 11, 10, 9, 8, 7, 6, 5],
"Center+Halfmoon-CCW (no M5,M1,M2)": [3, 12, 11, 10, 9, 8, 7, 6, 5],
"Halfmoon-Alternate": [5, 12, 6, 11, 7, 10, 8, 9],
"Halfmoon-Alternate-Swap": [12, 5, 11, 6, 10, 7, 9, 8],
"Halfmoon-CW": [5, 6, 7, 8, 9, 10, 11, 12],
"Halfmoon-CCW": [12, 11, 10, 9, 8, 7, 6, 5],
}
| scjurgen/hpd-20 | hpd20/melodypadpattern.py | Python | gpl-3.0 | 1,102 |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Cisco Systems, Inc.
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import collections
import copy
import logging
import netaddr
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from neutronclient.common import exceptions as neutron_exc
from neutronclient.v2_0 import client as neutron_client
import six
from horizon import exceptions
from horizon import messages
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import network_base
from openstack_dashboard.api import nova
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
IP_VERSION_DICT = {4: 'IPv4', 6: 'IPv6'}
OFF_STATE = 'OFF'
ON_STATE = 'ON'
ROUTER_INTERFACE_OWNERS = (
'network:router_interface',
'network:router_interface_distributed',
'network:ha_router_replicated_interface'
)
class NeutronAPIDictWrapper(base.APIDictWrapper):
def __init__(self, apidict):
if 'admin_state_up' in apidict:
if apidict['admin_state_up']:
apidict['admin_state'] = 'UP'
else:
apidict['admin_state'] = 'DOWN'
# Django cannot handle a key name with ':', so use '__'.
apidict.update({
key.replace(':', '__'): value
for key, value in apidict.items()
if ':' in key
})
super(NeutronAPIDictWrapper, self).__init__(apidict)
def set_id_as_name_if_empty(self, length=8):
try:
if not self._apidict['name'].strip():
id = self._apidict['id']
if length:
id = id[:length]
self._apidict['name'] = '(%s)' % id
except KeyError:
pass
def items(self):
return self._apidict.items()
@property
def name_or_id(self):
return (self._apidict.get('name').strip() or
'(%s)' % self._apidict['id'][:13])
class Agent(NeutronAPIDictWrapper):
"""Wrapper for neutron agents."""
class Network(NeutronAPIDictWrapper):
"""Wrapper for neutron Networks."""
def to_dict(self):
d = dict(super(NeutronAPIDictWrapper, self).to_dict())
d['subnets'] = [s.to_dict() for s in d['subnets']]
return d
class Subnet(NeutronAPIDictWrapper):
"""Wrapper for neutron subnets."""
def __init__(self, apidict):
apidict['ipver_str'] = get_ipver_str(apidict['ip_version'])
super(Subnet, self).__init__(apidict)
class SubnetPool(NeutronAPIDictWrapper):
"""Wrapper for neutron subnetpools."""
class Port(NeutronAPIDictWrapper):
"""Wrapper for neutron ports."""
def __init__(self, apidict):
if 'mac_learning_enabled' in apidict:
apidict['mac_state'] = \
ON_STATE if apidict['mac_learning_enabled'] else OFF_STATE
pairs = apidict.get('allowed_address_pairs')
if pairs:
apidict = copy.deepcopy(apidict)
wrapped_pairs = [PortAllowedAddressPair(pair) for pair in pairs]
apidict['allowed_address_pairs'] = wrapped_pairs
super(Port, self).__init__(apidict)
class PortAllowedAddressPair(NeutronAPIDictWrapper):
"""Wrapper for neutron port allowed address pairs."""
def __init__(self, addr_pair):
super(PortAllowedAddressPair, self).__init__(addr_pair)
# Horizon references id property for table operations
self.id = addr_pair['ip_address']
class Profile(NeutronAPIDictWrapper):
"""Wrapper for neutron profiles."""
_attrs = ['profile_id', 'name', 'segment_type', 'segment_range',
'sub_type', 'multicast_ip_index', 'multicast_ip_range']
class Router(NeutronAPIDictWrapper):
"""Wrapper for neutron routers."""
class RouterStaticRoute(NeutronAPIDictWrapper):
"""Wrapper for neutron routes extra route."""
def __init__(self, route):
super(RouterStaticRoute, self).__init__(route)
# Horizon references id property for table operations
self.id = route['nexthop'] + ":" + route['destination']
class SecurityGroup(NeutronAPIDictWrapper):
# Required attributes: id, name, description, tenant_id, rules
def __init__(self, sg, sg_dict=None):
if sg_dict is None:
sg_dict = {sg['id']: sg['name']}
sg['rules'] = [SecurityGroupRule(rule, sg_dict)
for rule in sg['security_group_rules']]
super(SecurityGroup, self).__init__(sg)
def to_dict(self):
return {k: self._apidict[k] for k in self._apidict if k != 'rules'}
@six.python_2_unicode_compatible
class SecurityGroupRule(NeutronAPIDictWrapper):
# Required attributes:
# id, parent_group_id
# ip_protocol, from_port, to_port, ip_range, group
# ethertype, direction (Neutron specific)
def _get_secgroup_name(self, sg_id, sg_dict):
if sg_id:
if sg_dict is None:
sg_dict = {}
# If sg name not found in sg_dict,
# first two parts of UUID is used as sg name.
return sg_dict.get(sg_id, sg_id[:13])
else:
return u''
def __init__(self, sgr, sg_dict=None):
# In Neutron, if both remote_ip_prefix and remote_group_id are None,
# it means all remote IP range is allowed, i.e., 0.0.0.0/0 or ::/0.
if not sgr['remote_ip_prefix'] and not sgr['remote_group_id']:
if sgr['ethertype'] == 'IPv6':
sgr['remote_ip_prefix'] = '::/0'
else:
sgr['remote_ip_prefix'] = '0.0.0.0/0'
rule = {
'id': sgr['id'],
'parent_group_id': sgr['security_group_id'],
'direction': sgr['direction'],
'ethertype': sgr['ethertype'],
'ip_protocol': sgr['protocol'],
'from_port': sgr['port_range_min'],
'to_port': sgr['port_range_max'],
}
cidr = sgr['remote_ip_prefix']
rule['ip_range'] = {'cidr': cidr} if cidr else {}
group = self._get_secgroup_name(sgr['remote_group_id'], sg_dict)
rule['group'] = {'name': group} if group else {}
super(SecurityGroupRule, self).__init__(rule)
def __str__(self):
if 'name' in self.group:
remote = self.group['name']
elif 'cidr' in self.ip_range:
remote = self.ip_range['cidr']
else:
remote = 'ANY'
direction = 'to' if self.direction == 'egress' else 'from'
if self.from_port:
if self.from_port == self.to_port:
proto_port = ("%s/%s" %
(self.from_port, self.ip_protocol.lower()))
else:
proto_port = ("%s-%s/%s" %
(self.from_port, self.to_port,
self.ip_protocol.lower()))
elif self.ip_protocol:
try:
ip_proto = int(self.ip_protocol)
proto_port = "ip_proto=%d" % ip_proto
except Exception:
# well-defined IP protocol name like TCP, UDP, ICMP.
proto_port = self.ip_protocol
else:
proto_port = ''
return (_('ALLOW %(ethertype)s %(proto_port)s '
'%(direction)s %(remote)s') %
{'ethertype': self.ethertype,
'proto_port': proto_port,
'remote': remote,
'direction': direction})
class SecurityGroupManager(network_base.SecurityGroupManager):
backend = 'neutron'
def __init__(self, request):
self.request = request
self.client = neutronclient(request)
def _list(self, **filters):
secgroups = self.client.list_security_groups(**filters)
return [SecurityGroup(sg) for sg in secgroups.get('security_groups')]
def list(self):
tenant_id = self.request.user.tenant_id
return self._list(tenant_id=tenant_id)
def _sg_name_dict(self, sg_id, rules):
"""Create a mapping dict from secgroup id to its name."""
related_ids = set([sg_id])
related_ids |= set(filter(None, [r['remote_group_id'] for r in rules]))
related_sgs = self.client.list_security_groups(id=related_ids,
fields=['id', 'name'])
related_sgs = related_sgs.get('security_groups')
return dict((sg['id'], sg['name']) for sg in related_sgs)
def get(self, sg_id):
secgroup = self.client.show_security_group(sg_id).get('security_group')
sg_dict = self._sg_name_dict(sg_id, secgroup['security_group_rules'])
return SecurityGroup(secgroup, sg_dict)
def create(self, name, desc):
body = {'security_group': {'name': name,
'description': desc,
'tenant_id': self.request.user.project_id}}
secgroup = self.client.create_security_group(body)
return SecurityGroup(secgroup.get('security_group'))
def update(self, sg_id, name, desc):
body = {'security_group': {'name': name,
'description': desc}}
secgroup = self.client.update_security_group(sg_id, body)
return SecurityGroup(secgroup.get('security_group'))
def delete(self, sg_id):
self.client.delete_security_group(sg_id)
def rule_create(self, parent_group_id,
direction=None, ethertype=None,
ip_protocol=None, from_port=None, to_port=None,
cidr=None, group_id=None):
if not cidr:
cidr = None
if from_port < 0:
from_port = None
if to_port < 0:
to_port = None
if isinstance(ip_protocol, int) and ip_protocol < 0:
ip_protocol = None
body = {'security_group_rule':
{'security_group_id': parent_group_id,
'direction': direction,
'ethertype': ethertype,
'protocol': ip_protocol,
'port_range_min': from_port,
'port_range_max': to_port,
'remote_ip_prefix': cidr,
'remote_group_id': group_id}}
try:
rule = self.client.create_security_group_rule(body)
except neutron_exc.Conflict:
raise exceptions.Conflict(_('Security group rule already exists.'))
rule = rule.get('security_group_rule')
sg_dict = self._sg_name_dict(parent_group_id, [rule])
return SecurityGroupRule(rule, sg_dict)
def rule_delete(self, sgr_id):
self.client.delete_security_group_rule(sgr_id)
def list_by_instance(self, instance_id):
"""Gets security groups of an instance."""
ports = port_list(self.request, device_id=instance_id)
sg_ids = []
for p in ports:
sg_ids += p.security_groups
return self._list(id=set(sg_ids)) if sg_ids else []
def update_instance_security_group(self, instance_id,
new_security_group_ids):
ports = port_list(self.request, device_id=instance_id)
for p in ports:
params = {'security_groups': new_security_group_ids}
port_update(self.request, p.id, **params)
class FloatingIp(base.APIDictWrapper):
_attrs = ['id', 'ip', 'fixed_ip', 'port_id', 'instance_id',
'instance_type', 'pool']
def __init__(self, fip):
fip['ip'] = fip['floating_ip_address']
fip['fixed_ip'] = fip['fixed_ip_address']
fip['pool'] = fip['floating_network_id']
super(FloatingIp, self).__init__(fip)
class FloatingIpPool(base.APIDictWrapper):
pass
class FloatingIpTarget(base.APIDictWrapper):
pass
class FloatingIpManager(network_base.FloatingIpManager):
device_owner_map = {
'compute:': 'compute',
'neutron:LOADBALANCER': 'loadbalancer',
}
def __init__(self, request):
self.request = request
self.client = neutronclient(request)
def list_pools(self):
search_opts = {'router:external': True}
return [FloatingIpPool(pool) for pool
in self.client.list_networks(**search_opts).get('networks')]
def _get_instance_type_from_device_owner(self, device_owner):
for key, value in self.device_owner_map.items():
if device_owner.startswith(key):
return value
return device_owner
def _set_instance_info(self, fip, port=None):
if fip['port_id']:
if not port:
port = port_get(self.request, fip['port_id'])
fip['instance_id'] = port.device_id
fip['instance_type'] = self._get_instance_type_from_device_owner(
port.device_owner)
else:
fip['instance_id'] = None
fip['instance_type'] = None
def list(self, all_tenants=False, **search_opts):
if not all_tenants:
tenant_id = self.request.user.tenant_id
# In Neutron, list_floatingips returns Floating IPs from
# all tenants when the API is called with admin role, so
# we need to filter them with tenant_id.
search_opts['tenant_id'] = tenant_id
port_search_opts = {'tenant_id': tenant_id}
else:
port_search_opts = {}
fips = self.client.list_floatingips(**search_opts)
fips = fips.get('floatingips')
# Get port list to add instance_id to floating IP list
# instance_id is stored in device_id attribute
ports = port_list(self.request, **port_search_opts)
port_dict = collections.OrderedDict([(p['id'], p) for p in ports])
for fip in fips:
self._set_instance_info(fip, port_dict.get(fip['port_id']))
return [FloatingIp(fip) for fip in fips]
def get(self, floating_ip_id):
fip = self.client.show_floatingip(floating_ip_id).get('floatingip')
self._set_instance_info(fip)
return FloatingIp(fip)
def allocate(self, pool, tenant_id=None, **params):
if not tenant_id:
tenant_id = self.request.user.project_id
create_dict = {'floating_network_id': pool,
'tenant_id': tenant_id}
if 'floating_ip_address' in params:
create_dict['floating_ip_address'] = params['floating_ip_address']
fip = self.client.create_floatingip(
{'floatingip': create_dict}).get('floatingip')
self._set_instance_info(fip)
return FloatingIp(fip)
def release(self, floating_ip_id):
self.client.delete_floatingip(floating_ip_id)
def associate(self, floating_ip_id, port_id):
# NOTE: In Neutron Horizon floating IP support, port_id is
# "<port_id>_<ip_address>" format to identify multiple ports.
pid, ip_address = port_id.split('_', 1)
update_dict = {'port_id': pid,
'fixed_ip_address': ip_address}
self.client.update_floatingip(floating_ip_id,
{'floatingip': update_dict})
def disassociate(self, floating_ip_id):
update_dict = {'port_id': None}
self.client.update_floatingip(floating_ip_id,
{'floatingip': update_dict})
def _get_reachable_subnets(self, ports):
if not is_enabled_by_config('enable_fip_topology_check', True):
# All subnets are reachable from external network
return set(
p.fixed_ips[0]['subnet_id'] for p in ports if p.fixed_ips
)
# Retrieve subnet list reachable from external network
ext_net_ids = [ext_net.id for ext_net in self.list_pools()]
gw_routers = [r.id for r in router_list(self.request)
if (r.external_gateway_info and
r.external_gateway_info.get('network_id')
in ext_net_ids)]
reachable_subnets = set([p.fixed_ips[0]['subnet_id'] for p in ports
if ((p.device_owner in
ROUTER_INTERFACE_OWNERS)
and (p.device_id in gw_routers))])
# we have to include any shared subnets as well because we may not
# have permission to see the router interface to infer connectivity
shared = set([s.id for n in network_list(self.request, shared=True)
for s in n.subnets])
return reachable_subnets | shared
def list_targets(self):
tenant_id = self.request.user.tenant_id
ports = port_list(self.request, tenant_id=tenant_id)
servers, has_more = nova.server_list(self.request)
server_dict = collections.OrderedDict(
[(s.id, s.name) for s in servers])
reachable_subnets = self._get_reachable_subnets(ports)
if is_service_enabled(self.request,
config_name='enable_lb',
ext_name='lbaas'):
# Also get the loadbalancer VIPs
vip_dict = {v['port_id']: v['name']
for v in self.client.list_vips().get('vips', [])}
else:
vip_dict = {}
targets = []
for p in ports:
# Remove network ports from Floating IP targets
if p.device_owner.startswith('network:'):
continue
port_id = p.id
server_name = server_dict.get(p.device_id) or vip_dict.get(port_id)
for ip in p.fixed_ips:
if ip['subnet_id'] not in reachable_subnets:
continue
target = {'name': '%s: %s' % (server_name, ip['ip_address']),
'id': '%s_%s' % (port_id, ip['ip_address']),
'port_id': port_id,
'instance_id': p.device_id}
targets.append(FloatingIpTarget(target))
return targets
def _target_ports_by_instance(self, instance_id):
if not instance_id:
return None
search_opts = {'device_id': instance_id}
return port_list(self.request, **search_opts)
def get_target_id_by_instance(self, instance_id, target_list=None):
if target_list is not None:
targets = [target for target in target_list
if target['instance_id'] == instance_id]
if not targets:
return None
return targets[0]['id']
else:
# In Neutron one port can have multiple ip addresses, so this
# method picks up the first one and generate target id.
ports = self._target_ports_by_instance(instance_id)
if not ports:
return None
return '{0}_{1}'.format(ports[0].id,
ports[0].fixed_ips[0]['ip_address'])
def list_target_id_by_instance(self, instance_id, target_list=None):
if target_list is not None:
return [target['id'] for target in target_list
if target['instance_id'] == instance_id]
else:
ports = self._target_ports_by_instance(instance_id)
v = []
for p in ports:
if len(p['fixed_ips']):
v.append('{0}_{1}'.format(p['id'], p['fixed_ips'][0]['ip_address']))
return v
def is_simple_associate_supported(self):
# NOTE: There are two reason that simple association support
# needs more considerations. (1) Neutron does not support the
# default floating IP pool at the moment. It can be avoided
# in case where only one floating IP pool exists.
# (2) Neutron floating IP is associated with each VIF and
# we need to check whether such VIF is only one for an instance
# to enable simple association support.
return False
def is_supported(self):
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
return network_config.get('enable_router', True)
def get_ipver_str(ip_version):
"""Convert an ip version number to a human-friendly string."""
return IP_VERSION_DICT.get(ip_version, '')
@memoized
def neutronclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
c = neutron_client.Client(token=request.user.token.id,
auth_url=base.url_for(request, 'identity'),
endpoint_url=base.url_for(request, 'network'),
insecure=insecure, ca_cert=cacert)
return c
def list_resources_with_long_filters(list_method,
filter_attr, filter_values, **params):
"""List neutron resources with handling RequestURITooLong exception.
If filter parameters are long, list resources API request leads to
414 error (URL is too long). For such case, this method split
list parameters specified by a list_field argument into chunks
and call the specified list_method repeatedly.
:param list_method: Method used to retrieve resource list.
:param filter_attr: attribute name to be filtered. The value corresponding
to this attribute is specified by "filter_values".
If you want to specify more attributes for a filter condition,
pass them as keyword arguments like "attr2=values2".
:param filter_values: values of "filter_attr" to be filtered.
If filter_values are too long and the total URI length exceed the
maximum length supported by the neutron server, filter_values will
be split into sub lists if filter_values is a list.
:param params: parameters to pass a specified listing API call
without any changes. You can specify more filter conditions
in addition to a pair of filter_attr and filter_values.
"""
try:
params[filter_attr] = filter_values
return list_method(**params)
except neutron_exc.RequestURITooLong as uri_len_exc:
# The URI is too long because of too many filter values.
# Use the excess attribute of the exception to know how many
# filter values can be inserted into a single request.
# We consider only the filter condition from (filter_attr,
# filter_values) and do not consider other filter conditions
# which may be specified in **params.
if type(filter_values) != list:
filter_values = [filter_values]
# Length of each query filter is:
# <key>=<value>& (e.g., id=<uuid>)
# The length will be key_len + value_maxlen + 2
all_filter_len = sum(len(filter_attr) + len(val) + 2
for val in filter_values)
allowed_filter_len = all_filter_len - uri_len_exc.excess
val_maxlen = max(len(val) for val in filter_values)
filter_maxlen = len(filter_attr) + val_maxlen + 2
chunk_size = allowed_filter_len // filter_maxlen
resources = []
for i in range(0, len(filter_values), chunk_size):
params[filter_attr] = filter_values[i:i + chunk_size]
resources.extend(list_method(**params))
return resources
def network_list(request, **params):
LOG.debug("network_list(): params=%s", params)
networks = neutronclient(request).list_networks(**params).get('networks')
# Get subnet list to expand subnet info in network list.
subnets = subnet_list(request)
subnet_dict = dict([(s['id'], s) for s in subnets])
# Expand subnet list from subnet_id to values.
for n in networks:
# Due to potential timing issues, we can't assume the subnet_dict data
# is in sync with the network data.
n['subnets'] = [subnet_dict[s] for s in n.get('subnets', []) if
s in subnet_dict]
return [Network(n) for n in networks]
def network_list_for_tenant(request, tenant_id, include_external=False,
**params):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If requested_networks specified, it searches requested_networks only.
"""
LOG.debug("network_list_for_tenant(): tenant_id=%s, params=%s"
% (tenant_id, params))
networks = []
shared = params.get('shared')
if shared is not None:
del params['shared']
if shared in (None, False):
# If a user has admin role, network list returned by Neutron API
# contains networks that do not belong to that tenant.
# So we need to specify tenant_id when calling network_list().
networks += network_list(request, tenant_id=tenant_id,
shared=False, **params)
if shared in (None, True):
# In the current Neutron API, there is no way to retrieve
# both owner networks and public networks in a single API call.
networks += network_list(request, shared=True, **params)
params['router:external'] = params.get('router:external', True)
if params['router:external'] and include_external:
if shared is not None:
params['shared'] = shared
fetched_net_ids = [n.id for n in networks]
# Retrieves external networks when router:external is not specified
# in (filtering) params or router:external=True filter is specified.
# When router:external=False is specified there is no need to query
# networking API because apparently nothing will match the filter.
ext_nets = network_list(request, **params)
networks += [n for n in ext_nets if
n.id not in fetched_net_ids]
return networks
def network_get(request, network_id, expand_subnet=True, **params):
LOG.debug("network_get(): netid=%s, params=%s" % (network_id, params))
network = neutronclient(request).show_network(network_id,
**params).get('network')
if expand_subnet:
if request.user.tenant_id == network['tenant_id'] or network['shared']:
# Since the number of subnets per network must be small,
# call subnet_get() for each subnet instead of calling
# subnet_list() once.
network['subnets'] = [subnet_get(request, sid)
for sid in network['subnets']]
return Network(network)
def network_create(request, **kwargs):
"""Create a network object.
:param request: request context
:param tenant_id: (optional) tenant id of the network created
:param name: (optional) name of the network created
:returns: Network object
"""
LOG.debug("network_create(): kwargs = %s" % kwargs)
# In the case network profiles are being used, profile id is needed.
if 'net_profile_id' in kwargs:
kwargs['n1kv:profile'] = kwargs.pop('net_profile_id')
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body = {'network': kwargs}
network = neutronclient(request).create_network(body=body).get('network')
return Network(network)
def network_update(request, network_id, **kwargs):
LOG.debug("network_update(): netid=%s, params=%s" % (network_id, kwargs))
body = {'network': kwargs}
network = neutronclient(request).update_network(network_id,
body=body).get('network')
return Network(network)
def network_delete(request, network_id):
LOG.debug("network_delete(): netid=%s" % network_id)
neutronclient(request).delete_network(network_id)
def subnet_list(request, **params):
LOG.debug("subnet_list(): params=%s" % (params))
subnets = neutronclient(request).list_subnets(**params).get('subnets')
return [Subnet(s) for s in subnets]
def subnet_get(request, subnet_id, **params):
LOG.debug("subnet_get(): subnetid=%s, params=%s" % (subnet_id, params))
subnet = neutronclient(request).show_subnet(subnet_id,
**params).get('subnet')
return Subnet(subnet)
def subnet_create(request, network_id, **kwargs):
"""Create a subnet on a specified network.
:param request: request context
:param network_id: network id a subnet is created on
:param cidr: (optional) subnet IP address range
:param ip_version: (optional) IP version (4 or 6)
:param gateway_ip: (optional) IP address of gateway
:param tenant_id: (optional) tenant id of the subnet created
:param name: (optional) name of the subnet created
:param subnetpool_id: (optional) subnetpool to allocate prefix from
:param prefixlen: (optional) length of prefix to allocate
:returns: Subnet object
Although both cidr+ip_version and subnetpool_id+preifxlen is listed as
optional you MUST pass along one of the combinations to get a successful
result.
"""
LOG.debug("subnet_create(): netid=%s, kwargs=%s"
% (network_id, kwargs))
body = {'subnet': {'network_id': network_id}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['subnet'].update(kwargs)
subnet = neutronclient(request).create_subnet(body=body).get('subnet')
return Subnet(subnet)
def subnet_update(request, subnet_id, **kwargs):
LOG.debug("subnet_update(): subnetid=%s, kwargs=%s" % (subnet_id, kwargs))
body = {'subnet': kwargs}
subnet = neutronclient(request).update_subnet(subnet_id,
body=body).get('subnet')
return Subnet(subnet)
def subnet_delete(request, subnet_id):
LOG.debug("subnet_delete(): subnetid=%s" % subnet_id)
neutronclient(request).delete_subnet(subnet_id)
def subnetpool_list(request, **params):
LOG.debug("subnetpool_list(): params=%s" % (params))
subnetpools = \
neutronclient(request).list_subnetpools(**params).get('subnetpools')
return [SubnetPool(s) for s in subnetpools]
def subnetpool_get(request, subnetpool_id, **params):
LOG.debug("subnetpool_get(): subnetpoolid=%s, params=%s" %
(subnetpool_id, params))
subnetpool = \
neutronclient(request).show_subnetpool(subnetpool_id,
**params).get('subnetpool')
return SubnetPool(subnetpool)
def subnetpool_create(request, name, prefixes, **kwargs):
"""Create a subnetpool.
ip_version is auto-detected in back-end.
Parameters:
request -- Request context
name -- Name for subnetpool
prefixes -- List of prefixes for pool
Keyword Arguments (optional):
min_prefixlen -- Minimum prefix length for allocations from pool
max_prefixlen -- Maximum prefix length for allocations from pool
default_prefixlen -- Default prefix length for allocations from pool
default_quota -- Default quota for allocations from pool
shared -- Subnetpool should be shared (Admin-only)
tenant_id -- Owner of subnetpool
Returns:
SubnetPool object
"""
LOG.debug("subnetpool_create(): name=%s, prefixes=%s, kwargs=%s"
% (name, prefixes, kwargs))
body = {'subnetpool':
{'name': name,
'prefixes': prefixes,
}
}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['subnetpool'].update(kwargs)
subnetpool = \
neutronclient(request).create_subnetpool(body=body).get('subnetpool')
return SubnetPool(subnetpool)
def subnetpool_update(request, subnetpool_id, **kwargs):
LOG.debug("subnetpool_update(): subnetpoolid=%s, kwargs=%s" %
(subnetpool_id, kwargs))
body = {'subnetpool': kwargs}
subnetpool = \
neutronclient(request).update_subnetpool(subnetpool_id,
body=body).get('subnetpool')
return SubnetPool(subnetpool)
def subnetpool_delete(request, subnetpool_id):
LOG.debug("subnetpool_delete(): subnetpoolid=%s" % subnetpool_id)
return neutronclient(request).delete_subnetpool(subnetpool_id)
def port_list(request, **params):
LOG.debug("port_list(): params=%s" % (params))
ports = neutronclient(request).list_ports(**params).get('ports')
return [Port(p) for p in ports]
def port_get(request, port_id, **params):
LOG.debug("port_get(): portid=%s, params=%s" % (port_id, params))
port = neutronclient(request).show_port(port_id, **params).get('port')
return Port(port)
def unescape_port_kwargs(**kwargs):
for key in kwargs:
if '__' in key:
kwargs[':'.join(key.split('__'))] = kwargs.pop(key)
return kwargs
def port_create(request, network_id, **kwargs):
"""Create a port on a specified network.
:param request: request context
:param network_id: network id a subnet is created on
:param device_id: (optional) device id attached to the port
:param tenant_id: (optional) tenant id of the port created
:param name: (optional) name of the port created
:returns: Port object
"""
LOG.debug("port_create(): netid=%s, kwargs=%s" % (network_id, kwargs))
# In the case policy profiles are being used, profile id is needed.
if 'policy_profile_id' in kwargs:
kwargs['n1kv:profile'] = kwargs.pop('policy_profile_id')
kwargs = unescape_port_kwargs(**kwargs)
body = {'port': {'network_id': network_id}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['port'].update(kwargs)
port = neutronclient(request).create_port(body=body).get('port')
return Port(port)
def port_delete(request, port_id):
LOG.debug("port_delete(): portid=%s" % port_id)
neutronclient(request).delete_port(port_id)
def port_update(request, port_id, **kwargs):
LOG.debug("port_update(): portid=%s, kwargs=%s" % (port_id, kwargs))
kwargs = unescape_port_kwargs(**kwargs)
body = {'port': kwargs}
port = neutronclient(request).update_port(port_id, body=body).get('port')
return Port(port)
def profile_list(request, type_p, **params):
LOG.debug("profile_list(): "
"profile_type=%(profile_type)s, params=%(params)s",
{'profile_type': type_p, 'params': params})
if type_p == 'network':
profiles = neutronclient(request).list_network_profiles(
**params).get('network_profiles')
elif type_p == 'policy':
profiles = neutronclient(request).list_policy_profiles(
**params).get('policy_profiles')
return [Profile(n) for n in profiles]
def profile_get(request, profile_id, **params):
LOG.debug("profile_get(): "
"profileid=%(profileid)s, params=%(params)s",
{'profileid': profile_id, 'params': params})
profile = neutronclient(request).show_network_profile(
profile_id, **params).get('network_profile')
return Profile(profile)
def profile_create(request, **kwargs):
LOG.debug("profile_create(): kwargs=%s", kwargs)
body = {'network_profile': {}}
body['network_profile'].update(kwargs)
profile = neutronclient(request).create_network_profile(
body=body).get('network_profile')
return Profile(profile)
def profile_delete(request, profile_id):
LOG.debug("profile_delete(): profile_id=%s", profile_id)
neutronclient(request).delete_network_profile(profile_id)
def profile_update(request, profile_id, **kwargs):
LOG.debug("profile_update(): "
"profileid=%(profileid)s, kwargs=%(kwargs)s",
{'profileid': profile_id, 'kwargs': kwargs})
body = {'network_profile': kwargs}
profile = neutronclient(request).update_network_profile(
profile_id, body=body).get('network_profile')
return Profile(profile)
def profile_bindings_list(request, type_p, **params):
LOG.debug("profile_bindings_list(): "
"profile_type=%(profile_type)s params=%(params)s",
{'profile_type': type_p, 'params': params})
if type_p == 'network':
bindings = neutronclient(request).list_network_profile_bindings(
**params).get('network_profile_bindings')
elif type_p == 'policy':
bindings = neutronclient(request).list_policy_profile_bindings(
**params).get('policy_profile_bindings')
return [Profile(n) for n in bindings]
def router_create(request, **kwargs):
LOG.debug("router_create():, kwargs=%s" % kwargs)
body = {'router': {}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['router'].update(kwargs)
router = neutronclient(request).create_router(body=body).get('router')
return Router(router)
def router_update(request, r_id, **kwargs):
LOG.debug("router_update(): router_id=%s, kwargs=%s" % (r_id, kwargs))
body = {'router': {}}
body['router'].update(kwargs)
router = neutronclient(request).update_router(r_id, body=body)
return Router(router['router'])
def router_get(request, router_id, **params):
router = neutronclient(request).show_router(router_id,
**params).get('router')
return Router(router)
def router_list(request, **params):
routers = neutronclient(request).list_routers(**params).get('routers')
return [Router(r) for r in routers]
def router_list_on_l3_agent(request, l3_agent_id, **params):
routers = neutronclient(request).\
list_routers_on_l3_agent(l3_agent_id,
**params).get('routers')
return [Router(r) for r in routers]
def router_delete(request, router_id):
neutronclient(request).delete_router(router_id)
def router_add_interface(request, router_id, subnet_id=None, port_id=None):
body = {}
if subnet_id:
body['subnet_id'] = subnet_id
if port_id:
body['port_id'] = port_id
client = neutronclient(request)
return client.add_interface_router(router_id, body)
def router_remove_interface(request, router_id, subnet_id=None, port_id=None):
body = {}
if subnet_id:
body['subnet_id'] = subnet_id
if port_id:
body['port_id'] = port_id
neutronclient(request).remove_interface_router(router_id, body)
def router_add_gateway(request, router_id, network_id):
body = {'network_id': network_id}
neutronclient(request).add_gateway_router(router_id, body)
def router_remove_gateway(request, router_id):
neutronclient(request).remove_gateway_router(router_id)
def router_static_route_list(request, router_id=None):
router = router_get(request, router_id)
try:
routes = [RouterStaticRoute(r) for r in router.routes]
except AttributeError:
LOG.debug("router_static_route_list(): router_id=%s, "
"router=%s", (router_id, router))
return []
return routes
def router_static_route_remove(request, router_id, route_ids):
currentroutes = router_static_route_list(request, router_id=router_id)
newroutes = []
for oldroute in currentroutes:
if oldroute.id not in route_ids:
newroutes.append({'nexthop': oldroute.nexthop,
'destination': oldroute.destination})
body = {'routes': newroutes}
new = router_update(request, router_id, **body)
return new
def router_static_route_add(request, router_id, newroute):
body = {}
currentroutes = router_static_route_list(request, router_id=router_id)
body['routes'] = [newroute] + [{'nexthop': r.nexthop,
'destination': r.destination}
for r in currentroutes]
new = router_update(request, router_id, **body)
return new
def tenant_quota_get(request, tenant_id):
return base.QuotaSet(neutronclient(request).show_quota(tenant_id)['quota'])
def tenant_quota_update(request, tenant_id, **kwargs):
quotas = {'quota': kwargs}
return neutronclient(request).update_quota(tenant_id, quotas)
def agent_list(request, **params):
agents = neutronclient(request).list_agents(**params)
return [Agent(a) for a in agents['agents']]
def list_dhcp_agent_hosting_networks(request, network, **params):
agents = neutronclient(request).list_dhcp_agent_hosting_networks(network,
**params)
return [Agent(a) for a in agents['agents']]
def list_l3_agent_hosting_router(request, router, **params):
agents = neutronclient(request).list_l3_agent_hosting_routers(router,
**params)
return [Agent(a) for a in agents['agents']]
def show_network_ip_availability(request, network_id):
ip_availability = neutronclient(request).show_network_ip_availability(
network_id)
return ip_availability
def add_network_to_dhcp_agent(request, dhcp_agent, network_id):
body = {'network_id': network_id}
return neutronclient(request).add_network_to_dhcp_agent(dhcp_agent, body)
def remove_network_from_dhcp_agent(request, dhcp_agent, network_id):
return neutronclient(request).remove_network_from_dhcp_agent(dhcp_agent,
network_id)
def provider_list(request):
providers = neutronclient(request).list_service_providers()
return providers['service_providers']
def servers_update_addresses(request, servers, all_tenants=False):
"""Retrieve servers networking information from Neutron if enabled.
Should be used when up to date networking information is required,
and Nova's networking info caching mechanism is not fast enough.
"""
# Get all (filtered for relevant servers) information from Neutron
try:
ports = list_resources_with_long_filters(
port_list, 'device_id', [instance.id for instance in servers],
request=request)
fips = FloatingIpManager(request)
if fips.is_supported():
floating_ips = list_resources_with_long_filters(
fips.list, 'port_id', [port.id for port in ports],
all_tenants=all_tenants)
else:
floating_ips = []
networks = list_resources_with_long_filters(
network_list, 'id', set([port.network_id for port in ports]),
request=request)
except Exception:
error_message = _('Unable to connect to Neutron.')
LOG.error(error_message)
messages.error(request, error_message)
return
# Map instance to its ports
instances_ports = collections.defaultdict(list)
for port in ports:
instances_ports[port.device_id].append(port)
# Map port to its floating ips
ports_floating_ips = collections.defaultdict(list)
for fip in floating_ips:
ports_floating_ips[fip.port_id].append(fip)
# Map network id to its name
network_names = dict(((network.id, network.name) for network in networks))
for server in servers:
try:
addresses = _server_get_addresses(
request,
server,
instances_ports,
ports_floating_ips,
network_names)
except Exception as e:
LOG.error(six.text_type(e))
else:
server.addresses = addresses
def _server_get_addresses(request, server, ports, floating_ips, network_names):
def _format_address(mac, ip, type):
try:
version = netaddr.IPAddress(ip).version
except Exception:
error_message = _('Unable to parse IP address %s.') % ip
LOG.error(error_message)
messages.error(request, error_message)
raise
return {u'OS-EXT-IPS-MAC:mac_addr': mac,
u'version': version,
u'addr': ip,
u'OS-EXT-IPS:type': type}
addresses = collections.defaultdict(list)
instance_ports = ports.get(server.id, [])
for port in instance_ports:
network_name = network_names.get(port.network_id)
if network_name is not None:
for fixed_ip in port.fixed_ips:
addresses[network_name].append(
_format_address(port.mac_address,
fixed_ip['ip_address'],
u'fixed'))
port_fips = floating_ips.get(port.id, [])
for fip in port_fips:
addresses[network_name].append(
_format_address(port.mac_address,
fip.floating_ip_address,
u'floating'))
return dict(addresses)
@memoized
def list_extensions(request):
extensions_list = neutronclient(request).list_extensions()
if 'extensions' in extensions_list:
return tuple(extensions_list['extensions'])
else:
return ()
@memoized
def is_extension_supported(request, extension_alias):
extensions = list_extensions(request)
for extension in extensions:
if extension['alias'] == extension_alias:
return True
else:
return False
def is_enabled_by_config(name, default=True):
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
return network_config.get(name, default)
@memoized
def is_service_enabled(request, config_name, ext_name):
return (is_enabled_by_config(config_name) and
is_extension_supported(request, ext_name))
@memoized
def is_quotas_extension_supported(request):
return (is_enabled_by_config('enable_quotas', False) and
is_extension_supported(request, 'quotas'))
# Using this mechanism till a better plugin/sub-plugin detection
# mechanism is available.
# When using specific plugins the profile_support can be
# turned on if needed to configure and/or use profiles.
# Since this is a temporary mechanism used to detect profile_support
# @memorize is not being used.
# TODO(absubram): Change this config variable check with
# subplugin/plugin detection API when it becomes available.
def is_port_profiles_supported():
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
# Can be used to check for vendor specific plugin
profile_support = network_config.get('profile_support', None)
if str(profile_support).lower() == 'cisco':
return True
# FEATURE_MAP is used to define:
# - related neutron extension name (key: "extension")
# - corresponding dashboard config (key: "config")
# - RBAC policies (key: "poclies")
# If a key is not contained, the corresponding permission check is skipped.
FEATURE_MAP = {
'dvr': {
'extension': 'dvr',
'config': {
'name': 'enable_distributed_router',
'default': False,
},
'policies': {
'get': 'get_router:distributed',
'create': 'create_router:distributed',
'update': 'update_router:distributed',
}
},
'l3-ha': {
'extension': 'l3-ha',
'config': {'name': 'enable_ha_router',
'default': False},
'policies': {
'get': 'get_router:ha',
'create': 'create_router:ha',
'update': 'update_router:ha',
}
},
}
def get_feature_permission(request, feature, operation=None):
"""Check if a feature-specific field can be displayed.
This method check a permission for a feature-specific field.
Such field is usually provided through Neutron extension.
:param request: Request Object
:param feature: feature name defined in FEATURE_MAP
:param operation (optional): Operation type. The valid value should be
defined in FEATURE_MAP[feature]['policies']
It must be specified if FEATURE_MAP[feature] has 'policies'.
"""
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
feature_info = FEATURE_MAP.get(feature)
if not feature_info:
# Translators: Only used inside Horizon code and invisible to users
raise ValueError(_("The requested feature '%(feature)s' is unknown. "
"Please make sure to specify a feature defined "
"in FEATURE_MAP."))
# Check dashboard settings
feature_config = feature_info.get('config')
if feature_config:
if not network_config.get(feature_config['name'],
feature_config['default']):
return False
# Check policy
feature_policies = feature_info.get('policies')
if feature_policies:
policy_name = feature_policies.get(operation)
if not policy_name:
# Translators: Only used inside Horizon code and invisible to users
raise ValueError(_("The 'operation' parameter for "
"get_feature_permission '%(feature)s' "
"is invalid. It should be one of %(allowed)s")
% {'feature': feature,
'allowed': ' '.join(feature_policies.keys())})
role = (('network', policy_name),)
if not policy.check(role, request):
return False
# Check if a required extension is enabled
feature_extension = feature_info.get('extension')
if feature_extension:
try:
return is_extension_supported(request, feature_extension)
except Exception:
msg = (_("Failed to check Neutron '%s' extension is not supported")
% feature_extension)
LOG.info(msg)
return False
# If all checks are passed, now a given feature is allowed.
return True
| sandvine/horizon | openstack_dashboard/api/neutron.py | Python | apache-2.0 | 50,711 |
import matplotlib.pyplot as plt
ax = plt.subplot(311)
b1 = ax.bar([0, 1, 2], [0.2, 0.3, 0.1], width=0.4,
label="Bar 1", align="center")
b2 = ax.bar([0.5, 1.5, 2.5], [0.3, 0.2, 0.2], color="red", width=0.4,
label="Bar 2", align="center")
ax.legend()
ax = plt.subplot(312)
err1 = ax.errorbar([0, 1, 2], [2, 3, 1], xerr=0.4, fmt="s",
label="test 1")
err2 = ax.errorbar([0, 1, 2], [3, 2, 4], yerr=0.3, fmt="o",
label="test 2")
err3 = ax.errorbar([0, 1, 2], [1, 1, 3], xerr=0.4, yerr=0.3, fmt="^",
label="test 3")
ax.legend()
ax = plt.subplot(313)
ll = ax.stem([0.3, 1.5, 2.7], [1, 3.6, 2.7], label="stem test")
ax.legend()
plt.show()
| lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/pylab_examples/legend_demo4.py | Python | mit | 723 |
# -*- coding: utf-8 -*-
"""
SQLpie License (MIT License)
Copyright (c) 2011-2016 André Lessa, http://sqlpie.com
See LICENSE file.
"""
from flask import Response
import json
import sqlpie
class SearchController(sqlpie.BaseController):
@staticmethod
@sqlpie.BaseController.controller_wrapper
def service_index(request=None):
rebuild = False
json_data = request.get_json()
if "options" in json_data:
options = json_data["options"]
if sqlpie.Indexer.REBUILD_PARAM in options:
rebuild = options[sqlpie.Indexer.REBUILD_PARAM]
if rebuild:
sqlpie.Indexer.rebuild()
sqlpie.Indexer().index_documents()
return {'success': True}
@staticmethod
@sqlpie.BaseController.controller_wrapper
def service_search(request):
json_data = request.get_json()
query, tagcloud_search, geo_radius_search, geo_target_search = "", "", "", ""
geo_sort_by = sqlpie.Searcher.SORT_BY_DISTANCE
is_tagcloud_search = False
is_geo_search = False
num_results = 10
start_result = 0
if sqlpie.Searcher.QUERY_OPERATOR in json_data:
query = json_data[sqlpie.Searcher.QUERY_OPERATOR]
if sqlpie.Searcher.TAGCLOUD_OPERATOR in json_data:
tagcloud_search = json_data[sqlpie.Searcher.TAGCLOUD_OPERATOR].lower()
if sqlpie.Searcher.GEO_RADIUS_OPERATOR in json_data:
geo_radius_search = json_data[sqlpie.Searcher.GEO_RADIUS_OPERATOR]
if sqlpie.Searcher.GEO_TARGET_OPERATOR in json_data:
geo_target_search = json_data[sqlpie.Searcher.GEO_TARGET_OPERATOR].lower()
if sqlpie.Searcher.GEO_SORT_BY in json_data:
geo_sort_by = json_data[sqlpie.Searcher.GEO_SORT_BY].lower()
if sqlpie.Searcher.NUM_RESULTS in json_data:
num_results = int(json_data[sqlpie.Searcher.NUM_RESULTS])
if sqlpie.Searcher.START_RESULT in json_data:
start_result = int(json_data[sqlpie.Searcher.START_RESULT])
if tagcloud_search:
if not tagcloud_search in [sqlpie.Searcher.SORT_TAGCLOUD_BY_RELEVANCE, \
sqlpie.Searcher.SORT_TAGCLOUD_BY_FREQUENCY]:
raise sqlpie.CustomException(sqlpie.CustomException.INVALID_ARGUMENTS)
else:
is_tagcloud_search = True
if geo_radius_search or geo_target_search:
if not sqlpie.Util.is_number(geo_radius_search) or not geo_radius_search or \
not geo_target_search or not len(geo_target_search.split(",")) == 2 or \
not sqlpie.Util.is_number(geo_target_search.split(",")[0]) or \
not sqlpie.Util.is_number(geo_target_search.split(",")[1]) or \
geo_sort_by not in [sqlpie.Searcher.SORT_BY_RELEVANCE, sqlpie.Searcher.SORT_BY_DISTANCE]:
raise sqlpie.CustomException(sqlpie.CustomException.INVALID_ARGUMENTS)
else:
is_geo_search = True
engine = sqlpie.Searcher(query)
if is_tagcloud_search:
results = engine.run_tagcloud(tagcloud_search, num_results)
elif is_geo_search:
results = engine.run_geosearch(geo_radius_search, geo_target_search, num_results, start_result, geo_sort_by)
else:
results = engine.run_searcher(num_results, start_result)
return {'success': True, 'results':results}
| lessaworld/SQLpie | sqlpie/controllers/search_controller.py | Python | mit | 3,464 |
import os
from conf import settings
from util import *
class ProjectDefinition(object):
SRID=settings.GEOGRAPHIC_SRID
def __unicode__(self):
return '<ProjectDefinition "{0}">'.format(self.title)
def __str__(self):
return unicode(self)
def __init__(self, name, title, industrial_parcels, demography, converted_parcels_shapefile, raster_layers, fips_list):
self.name = name
self.title = title
self.demography = demography
self.industrial_parcels = industrial_parcels
self.converted_parcels_shapefile = converted_parcels_shapefile
self.raster_layers = raster_layers
self.fips_list = fips_list
def raw_data_dir(self, path=''):
return os.path.join(settings.RAW_DATA_DIR, self.name, path)
def app_data_dir(self, path=''):
return os.path.join(settings.APP_DATA_DIR, self.name, path)
# def probabilty_names(self):
# return [p['name'] for p in self.industrial_parcels['probability_categories']]
@property
def raw_industrial_table(self):
return '_R_' + self.name + '_industrial'
@property
def raw_brownfields_table(self):
return '_R_' + self.name + '_brownfields'
@property
def raw_converted_table(self):
return '_R_' + self.name + '_converted'
@property
def occupation_table(self):
return self.name + '_occupation'
@property
def race_table(self):
return self.name + '_race'
@property
def industrial_table(self):
return self.name + '_industrial'
@property
def raw_demography_table(self):
return '_R_' + self.name + '_demography'
def load_shapefiles(self):
load_shapefile(self.raw_industrial_table, self.industrial_parcels['shapefile'], self.SRID)
load_shapefile(self.raw_converted_table, self.converted_parcels_shapefile, self.SRID)
demography_race_fields = {
'MGMT': 'management',
'SERVICE': 'service',
'OFFICE': 'office',
'CONST': 'construction',
'PROD': 'manufacturing',
}
meck = ProjectDefinition(
name='meck',
title='Mecklenburg County, North Carolina',
converted_parcels_shapefile='meck_rawdata/alreadyconverted_m_P',
industrial_parcels={
'shapefile': 'meck_rawdata/still_industrial_m_P',
'probability_categories': (
{
'rawname': 'pcnv_l',
'name': 'risk_main',
'title': 'Probability of Conversion to Industrial'
},
{
'rawname': 'pcnv_r',
'name': 'risk_res',
'title': 'Probability of Conversion to Industrial from Residential'
},
{
'rawname': 'pcnv_c',
'name': 'risk_com',
'title': 'Probability of Conversion to Industrial from Commercial'
},
),
},
demography=settings.demography_categories,
raster_layers=(
{
'name': 'corridors-wedges',
'title': 'Corridors & Wedges',
'format': 'png',
'minzoom': 10,
'maxzoom': 13,
},
# TODO ...
),
fips_list=('51117', '37119')
)
cook = ProjectDefinition(
name='cook',
title='Cook County, Illinois',
industrial_parcels={
'shapefile': 'cook_rawdata/cook_still_industrial-WGS84',
'probability_categories': (
{
'rawname': 'PCnv_',
'name': 'risk_main',
'title': 'Probability of Conversion to Industrial'
},
),
},
demography=settings.demography_categories,
converted_parcels_shapefile='cook_rawdata/cook_alreadyconverted1',
raster_layers=(),
fips_list=('17031',)
)
projects = set((
meck, cook,
))
| maackle/ILC-app | conf/projects.py | Python | gpl-3.0 | 3,813 |
__author__ = 'amarch'
# -*- coding: utf-8 -*-
from utils import strutils as infoutils
import itertools
from scipy.integrate import *
from RotationCurve import *
from Galaxy import *
from utils import strutils as infoutils
import itertools
import copy
from RadialToAzimuthalRatioHandler import *
import scipy.optimize
class RadialToVerticalRatioHandler():
def __init__(self, galaxy):
self.galaxy = galaxy
self.sigZ_to_sigR = 0.0
self.sig_R_0 = 0.0
def residuals(self, params, xdata, ydata):
return (ydata - numpy.dot(xdata, params))
def experimental_alpha_evaluation(self, normalize=False):
r_eff = self.galaxy.r_eff
x0 = [0.3, 0.3]
sig_max = self.galaxy.sig_los_mi.bezier(0.0)**2
points = map(lambda p: [abs(p[0]), p[1]], self.galaxy.sig_los_ma.data_points)
# points = filter(lambda p: p[0] > r_eff, points)
points.sort()
radii = [p[0] for p in points]
if normalize:
ydata = numpy.concatenate(([sig_max],[(p[1]**2)/(self.norm_sig_los_mi(p[0])**2) for p in points]))
xdata = numpy.transpose(numpy.array([numpy.concatenate(([1.0],[self.galaxy.sve_handler.sigPhi2_to_sigR2(x) for x in radii])),
numpy.concatenate(([1.0],[1.0 for x in radii]))]))
else:
ydata = numpy.concatenate(([sig_max],[p[1]**2 for p in points]))
xdata = numpy.transpose(numpy.array([numpy.concatenate(([1.0],[(self.norm_sig_los_mi(x)**2)*self.galaxy.sve_handler.sigPhi2_to_sigR2(x) for x in radii])),
numpy.concatenate(([1.0],[(self.norm_sig_los_mi(x)**2) for x in radii]))]))
solution = scipy.optimize.leastsq(self.residuals, x0, args=(xdata, ydata))[0]
print 'Solution: <',solution[0],' : ',solution[1],'>'
if solution[0] > 0 and solution[1] > 0:
tan = math.tan(self.galaxy.incl*math.pi/180.0)
sin = math.sin(self.galaxy.incl*math.pi/180.0)
self.sig_R_0 = math.sqrt(solution[0])/sin
self.sigZ_to_sigR = math.sqrt(solution[1]/solution[0])*tan
print 'sig_R_0: ', self.sig_R_0
print 'sigZ/sigR: ', self.sigZ_to_sigR
# self.set_sigZ_to_sigR(0.19)
def experimental_alpha_evaluation2(self, normalize=False):
r_eff = self.galaxy.r_eff
x0 = [0.3, 0.3]
sig_max = self.galaxy.sig_los_mi.bezier(0.0)**2
radii_range = [abs(x[0]) for x in self.galaxy.sig_los_ma.data_points]
# points = map(lambda p: [abs(p[0]), self.galaxy.sig_los_ma.bezier(abs(p[0]))], self.galaxy.sig_los_ma.data_points)
points = map(lambda p: [abs(p), self.galaxy.sig_los_ma.bezier(abs(p))], numpy.arange(min(radii_range), max(radii_range), 0.1).tolist())
points = filter(lambda p: p[0] > r_eff, points)
points.sort()
radii = [p[0] for p in points]
if normalize:
ydata = numpy.concatenate(([sig_max],[(p[1]**2)/(self.norm_sig_los_mi(p[0])**2) for p in points]))
xdata = numpy.transpose(numpy.array([numpy.concatenate(([1.0],[self.galaxy.sve_handler.sigPhi2_to_sigR2(x) for x in radii])),
numpy.concatenate(([1.0],[1.0 for x in radii]))]))
else:
ydata = numpy.concatenate(([sig_max],[p[1]**2 for p in points]))
xdata = numpy.transpose(numpy.array([numpy.concatenate(([1.0],[(self.norm_sig_los_mi(x)**2)*self.galaxy.sve_handler.sigPhi2_to_sigR2(x) for x in radii])),
numpy.concatenate(([1.0],[(self.norm_sig_los_mi(x)**2) for x in radii]))]))
solution = scipy.optimize.leastsq(self.residuals, x0, args=(xdata, ydata))[0]
print 'Solution: <',solution[0],' : ',solution[1],'>'
if solution[0] > 0 and solution[1] > 0:
tan = math.tan(self.galaxy.incl*math.pi/180.0)
sin = math.sin(self.galaxy.incl*math.pi/180.0)
self.sig_R_0 = math.sqrt(solution[0])/sin
self.sigZ_to_sigR = math.sqrt(solution[1]/solution[0])*tan
print 'sig_R_0: ', self.sig_R_0
print 'sigZ/sigR: ', self.sigZ_to_sigR
# self.set_sigZ_to_sigR(0.19)
def experimental_alpha_evaluation3(self):
r_eff = self.galaxy.r_eff
x0 = [0.3, 0.3]
sig_max = self.galaxy.sig_los_mi.bezier(0.0)**2
points_ma = map(lambda p: [abs(p[0]), self.galaxy.sig_los_ma.bezier(abs(p[0]))], self.galaxy.sig_los_ma.data_points)
# points_ma = filter(lambda p: p[0] > r_eff, points_ma)
points_ma.sort()
radii_ma = [p[0] for p in points_ma]
points_mi = map(lambda p: [abs(p[0]), p[1]], self.galaxy.sig_los_mi.data_points)
# points_mi = filter(lambda p: p[0] > r_eff, points_mi)
points_mi.sort()
radii_mi = [p[0] for p in points_mi]
ydata = numpy.concatenate(([sig_max],[p[1]**2 for p in points_ma], [p[1]**2 for p in points_mi]))
xdata = numpy.transpose(numpy.array([numpy.concatenate(([1.0],
[(self.norm_sig_los_mi(x)**2)*self.galaxy.sve_handler.sigPhi2_to_sigR2(x) for x in radii_ma],
[(self.galaxy.sig_los_mi.bezier(x)**2)/sig_max for x in radii_mi])),
numpy.concatenate(([1.0],
[(self.norm_sig_los_mi(x)**2) for x in radii_ma],
[(self.galaxy.sig_los_mi.bezier(x)**2)/sig_max for x in radii_mi]))]))
solution = scipy.optimize.leastsq(self.residuals, x0, args=(xdata, ydata))[0]
print 'Solution: <',solution[0],' : ',solution[1],'>'
if solution[0] > 0 and solution[1] > 0:
tan = math.tan(self.galaxy.incl*math.pi/180.0)
sin = math.sin(self.galaxy.incl*math.pi/180.0)
self.sig_R_0 = math.sqrt(solution[0])/sin
self.sigZ_to_sigR = math.sqrt(solution[1]/solution[0])*tan
print 'sig_R_0: ', self.sig_R_0
print 'sigZ/sigR: ', self.sigZ_to_sigR
def set_sigZ_to_sigR(self, alpha):
self.sigZ_to_sigR = 0.2
self.sig_R_0 = self.galaxy.sig_los_mi.bezier(0.0)/math.sqrt(math.sin(self.galaxy.incl*math.pi/180.0)**2 +
(self.sigZ_to_sigR*math.cos(self.galaxy.incl*math.pi/180.0))**2)
def norm_sig_los_mi(self, x):
sig_max = self.galaxy.sig_los_mi.bezier(0.0)
return self.galaxy.sig_los_mi.bezier(x)/sig_max
def plot_residuals(self):
# plt.plot([0.0] + radii, map(abs, self.residuals((solution[0], solution[1]), xdata, ydata)), 'x-')
pass
def plot_sig_R(self):
points = map(lambda p : [abs(p[0]), p[1]], self.galaxy.sig_los_ma.data_points)
points.sort()
radii = [p[0] for p in points]
plt.plot(radii, [self.sig_R(x) for x in radii], 'x-', label=(r'$\sigma_{R}^{\alpha=%s}$' % self.sigZ_to_sigR))
def sig_R(self, x):
return self.sig_R_0*self.norm_sig_los_mi(x)
def sig_Z(self, x):
return self.sigZ_to_sigR*self.sig_R(x)
def sig_Phi(self, x):
return self.sig_R(x)*self.galaxy.sve_handler.sigPhi_to_sigR(x)
def plot_sig_Z(self):
points = map(lambda p : [abs(p[0]), p[1]], self.galaxy.sig_los_ma.data_points)
points.sort()
radii = [p[0] for p in points]
plt.plot(radii, [self.sig_Z(x) for x in radii], 'x-', label=(r'$\sigma_{Z}^{\alpha=%s}$' % self.sigZ_to_sigR))
def plot_reconstructed_sig_los_mi(self):
points = map(lambda p : [abs(p[0]), p[1]], self.galaxy.sig_los_ma.data_points)
points.sort()
radii = [p[0] for p in points]
def zero_or_positive(x):
return 0 if x < 0 else x
def new_sig_mi_2(x):
return self.sig_R(x)**2*(math.sin(self.galaxy.incl*math.pi/180.0)**2 +
(self.sigZ_to_sigR*math.cos(self.galaxy.incl*math.pi/180.0))**2)
new_sig_los_mi = [math.sqrt(zero_or_positive(new_sig_mi_2(x))) for x in radii]
plt.plot(radii, new_sig_los_mi, 'v-', label=(r'$\sigma_{mi}^{\alpha=%s}$' % self.sigZ_to_sigR))
def plot_reconstructed_sig_los_ma(self):
points = map(lambda p : [abs(p[0]), p[1]], self.galaxy.sig_los_ma.data_points)
points.sort()
radii = [p[0] for p in points]
def zero_or_positive(x):
return 0 if x < 0 else x
def new_sig_ma_2(x):
return (self.sig_Phi(x)*math.sin(self.galaxy.incl*math.pi/180.0))**2 + \
(self.sig_Z(x)*math.cos(self.galaxy.incl*math.pi/180.0))**2
new_sig_los_ma = [math.sqrt(zero_or_positive(new_sig_ma_2(x))) for x in radii]
plt.plot(radii, new_sig_los_ma, 'v-', label=(r'$\sigma_{ma}^{\alpha=%s}$' % self.sigZ_to_sigR))
| Amarchuk/2FInstability | core/RadialToVerticalRatioHandler.py | Python | gpl-3.0 | 8,998 |
#from bin2kernel import Bin2Kernel
#from bin2kernel import makeBin2KernelAsEstimator
#from bin2kernel import Bin2KernelLaplaceLinearN
#from bin2kernel import getFastestBin2Kernel
#from bin2kernel import Bin2KernelEPLinearN
from laplace import LaplaceGLMM_N3K1, LaplaceGLMM_N1K3
from ep import EPGLMM_N3K1, EPGLMM_N1K3
from lmm import LMM
'''
Return the fastest implementation according to the data provided.
It basically determines if the number of individuals is bigger than
the number of snps.
'''
def getGLMM(approx, link, y, G0, G1, penalty=None, penalizeBias=False, debug=False):
k = 0
if G0 is not None:
k += G0.shape[1]
if G1 is not None:
k += G1.shape[1]
N = y.size
if N <= k:
if approx == 'laplace':
return LaplaceGLMM_N3K1(link, penalty=penalty, penalizeBias=penalizeBias, debug=debug)
if approx == 'ep':
return EPGLMM_N3K1(link, penalty=penalty, penalizeBias=penalizeBias, debug=debug)
assert False, 'Unkown approximation.'
if approx == 'laplace':
return LaplaceGLMM_N1K3(link, penalty=penalty, penalizeBias=penalizeBias, debug=debug)
if approx == 'ep':
return EPGLMM_N1K3(link, penalty=penalty, penalizeBias=penalizeBias, debug=debug)
assert False, 'Unkown approximation.'
def getLMM(forcefullrank=False):
return LMM(forcefullrank=forcefullrank)
| zhonghualiu/FaST-LMM | fastlmm/inference/__init__.py | Python | apache-2.0 | 1,419 |
# Copyright (C) 2010 Todd Kennedy <todd.kennedy@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import wx
class ListCtrlWithTree(wx.ListCtrl):
"""Used for creating a list ctrl with a tree"""
childIndent = " "
def __init__(self, ctrlColumn=0, *args, **kw):
wx.ListCtrl.__init__(self, *args, **kw)
self.ctrlColumn = ctrlColumn
def AddChild(self, child, parent):
pass
def RemoveChild(self, child):
pass | toddself/beermaker-archive | old/ListCtrlWithTree.py | Python | gpl-3.0 | 1,147 |
"""Unit tests for servicediscovery-supported APIs."""
import boto3
import sure # noqa # pylint: disable=unused-import
from moto import mock_servicediscovery
# See our Development Tips on writing tests for hints on how to write good tests:
# http://docs.getmoto.org/en/latest/docs/contributing/development_tips/tests.html
@mock_servicediscovery
def test_create_http_namespace_with_tags():
client = boto3.client("servicediscovery", region_name="eu-west-1")
client.create_http_namespace(
Name="mynamespace", Tags=[{"Key": "key1", "Value": "val1"}]
)
ns_arn = client.list_namespaces()["Namespaces"][0]["Arn"]
resp = client.list_tags_for_resource(ResourceARN=ns_arn)
resp.should.have.key("Tags")
resp["Tags"].should.equal([{"Key": "key1", "Value": "val1"}])
@mock_servicediscovery
def test_create_public_dns_namespace_with_tags():
client = boto3.client("servicediscovery", region_name="eu-west-1")
client.create_public_dns_namespace(
Name="mynamespace", Tags=[{"Key": "key1", "Value": "val1"}]
)
ns_arn = client.list_namespaces()["Namespaces"][0]["Arn"]
resp = client.list_tags_for_resource(ResourceARN=ns_arn)
resp.should.have.key("Tags")
resp["Tags"].should.equal([{"Key": "key1", "Value": "val1"}])
@mock_servicediscovery
def test_create_private_dns_namespace_with_tags():
client = boto3.client("servicediscovery", region_name="eu-west-1")
client.create_private_dns_namespace(
Name="mynamespace", Vpc="vpc", Tags=[{"Key": "key1", "Value": "val1"}]
)
ns_arn = client.list_namespaces()["Namespaces"][0]["Arn"]
resp = client.list_tags_for_resource(ResourceARN=ns_arn)
resp.should.have.key("Tags")
resp["Tags"].should.equal([{"Key": "key1", "Value": "val1"}])
@mock_servicediscovery
def test_create_service_with_tags():
client = boto3.client("servicediscovery", region_name="eu-west-1")
client.create_service(Name="myservice", Tags=[{"Key": "key1", "Value": "val1"}])
ns_arn = client.list_services()["Services"][0]["Arn"]
resp = client.list_tags_for_resource(ResourceARN=ns_arn)
resp.should.have.key("Tags")
resp["Tags"].should.equal([{"Key": "key1", "Value": "val1"}])
@mock_servicediscovery
def test_tag_resource():
client = boto3.client("servicediscovery", region_name="ap-southeast-1")
client.create_http_namespace(
Name="mynamespace", Tags=[{"Key": "key1", "Value": "val1"}]
)
ns_arn = client.list_namespaces()["Namespaces"][0]["Arn"]
client.tag_resource(ResourceARN=ns_arn, Tags=[{"Key": "key2", "Value": "val2"}])
resp = client.list_tags_for_resource(ResourceARN=ns_arn)
resp.should.have.key("Tags")
resp["Tags"].should.equal(
[{"Key": "key1", "Value": "val1"}, {"Key": "key2", "Value": "val2"}]
)
@mock_servicediscovery
def test_untag_resource():
client = boto3.client("servicediscovery", region_name="us-east-2")
client.create_http_namespace(Name="mynamespace")
ns_arn = client.list_namespaces()["Namespaces"][0]["Arn"]
client.tag_resource(
ResourceARN=ns_arn,
Tags=[{"Key": "key1", "Value": "val1"}, {"Key": "key2", "Value": "val2"}],
)
client.untag_resource(ResourceARN=ns_arn, TagKeys=["key1"])
resp = client.list_tags_for_resource(ResourceARN=ns_arn)
resp.should.have.key("Tags")
resp["Tags"].should.equal([{"Key": "key2", "Value": "val2"}])
| spulec/moto | tests/test_servicediscovery/test_servicediscovery_tags.py | Python | apache-2.0 | 3,419 |
from datetime import datetime
from verleihtool.test import ClientTestCase
from depot.models import Depot, Organization
from rental.models import Rental
class RentalDetailTestCase(ClientTestCase):
def create_rental(self, state):
return Rental.objects.create(
depot=self.depot,
start_date=datetime(2017, 3, 25),
return_date=datetime(2017, 3, 27),
state=state
)
def assertButtonCount(self, response, action, count):
data = {
'Pending': ('warning', 'Reset'),
'Revoked': ('danger', 'Revoke'),
'Approved': ('success', 'Approve'),
'Declined': ('danger', 'Decline'),
'Returned': ('info', 'Finish'),
}[action]
button = ('<button type="submit" class="btn btn-%s pull-left">%s</button>'
% data)
self.assertInHTML(button, response.content.decode(), count)
def assertButton(self, response, action):
self.assertButtonCount(response, action, count=1)
def assertNotButton(self, response, action):
self.assertButtonCount(response, action, count=0)
def setUp(self):
super().setUp()
organization = Organization.objects.create(
name='My organization'
)
self.depot = Depot.objects.create(
name='My depot',
organization=organization
)
def test_rental_detail(self):
rental = self.create_rental(Rental.STATE_PENDING)
response = self.as_guest.get('/rentals/%s/' % rental.uuid)
self.assertSuccess(response, 'rental/detail.html')
self.assertContains(response, 'March 25, 2017')
self.assertContains(response, 'March 27, 2017')
def test_rental_not_found_as_guest(self):
response = self.as_guest.get('/rentals/I-am-not-a-uuid/')
self.assertEqual(response.status_code, 404)
def test_rental_not_found_as_superuser(self):
response = self.as_superuser.get('/rentals/I-am-not-a-uuid/')
self.assertEqual(response.status_code, 404)
def test_buttons_as_guest_pending(self):
rental = self.create_rental(Rental.STATE_PENDING)
response = self.as_guest.get('/rentals/%s/' % rental.uuid)
self.assertButton(response, 'Revoked')
self.assertNotButton(response, 'Pending')
self.assertNotButton(response, 'Approved')
self.assertNotButton(response, 'Declined')
self.assertNotButton(response, 'Returned')
def test_buttons_as_guest_revoked(self):
rental = self.create_rental(Rental.STATE_REVOKED)
response = self.as_guest.get('/rentals/%s/' % rental.uuid)
self.assertNotButton(response, 'Revoked')
self.assertButton(response, 'Pending')
self.assertNotButton(response, 'Approved')
self.assertNotButton(response, 'Declined')
self.assertNotButton(response, 'Returned')
def test_buttons_as_guest_approved(self):
rental = self.create_rental(Rental.STATE_APPROVED)
response = self.as_guest.get('/rentals/%s/' % rental.uuid)
self.assertButton(response, 'Revoked')
self.assertNotButton(response, 'Pending')
self.assertNotButton(response, 'Approved')
self.assertNotButton(response, 'Declined')
self.assertNotButton(response, 'Returned')
def test_buttons_as_guest_declined(self):
rental = self.create_rental(Rental.STATE_DECLINED)
response = self.as_guest.get('/rentals/%s/' % rental.uuid)
self.assertNotButton(response, 'Revoked')
self.assertNotButton(response, 'Pending')
self.assertNotButton(response, 'Approved')
self.assertNotButton(response, 'Declined')
self.assertNotButton(response, 'Returned')
def test_buttons_as_guest_returned(self):
rental = self.create_rental(Rental.STATE_RETURNED)
response = self.as_guest.get('/rentals/%s/' % rental.uuid)
self.assertNotButton(response, 'Revoked')
self.assertNotButton(response, 'Pending')
self.assertNotButton(response, 'Approved')
self.assertNotButton(response, 'Declined')
self.assertNotButton(response, 'Returned')
def test_buttons_as_depot_manager_pending(self):
self.depot.manager_users.add(self.user)
rental = self.create_rental(Rental.STATE_PENDING)
response = self.as_user.get('/rentals/%s/' % rental.uuid)
self.assertNotButton(response, 'Revoked')
self.assertNotButton(response, 'Pending')
self.assertButton(response, 'Approved')
self.assertButton(response, 'Declined')
self.assertNotButton(response, 'Returned')
def test_buttons_as_depot_manager_revoked(self):
self.depot.manager_users.add(self.user)
rental = self.create_rental(Rental.STATE_REVOKED)
response = self.as_user.get('/rentals/%s/' % rental.uuid)
self.assertNotButton(response, 'Revoked')
self.assertButton(response, 'Pending')
self.assertNotButton(response, 'Approved')
self.assertNotButton(response, 'Declined')
self.assertNotButton(response, 'Returned')
def test_buttons_as_depot_manager_approved(self):
self.depot.manager_users.add(self.user)
rental = self.create_rental(Rental.STATE_APPROVED)
response = self.as_user.get('/rentals/%s/' % rental.uuid)
self.assertNotButton(response, 'Revoked')
self.assertButton(response, 'Pending')
self.assertNotButton(response, 'Approved')
self.assertButton(response, 'Declined')
self.assertButton(response, 'Returned')
def test_buttons_as_depot_manager_declined(self):
self.depot.manager_users.add(self.user)
rental = self.create_rental(Rental.STATE_DECLINED)
response = self.as_user.get('/rentals/%s/' % rental.uuid)
self.assertNotButton(response, 'Revoked')
self.assertButton(response, 'Pending')
self.assertButton(response, 'Approved')
self.assertNotButton(response, 'Declined')
self.assertNotButton(response, 'Returned')
def test_buttons_as_depot_manager_returned(self):
self.depot.manager_users.add(self.user)
rental = self.create_rental(Rental.STATE_RETURNED)
response = self.as_user.get('/rentals/%s/' % rental.uuid)
self.assertNotButton(response, 'Revoked')
self.assertNotButton(response, 'Pending')
self.assertButton(response, 'Approved')
self.assertNotButton(response, 'Declined')
self.assertNotButton(response, 'Returned')
| verleihtool/verleihtool | rental/tests/test_rental_detail.py | Python | agpl-3.0 | 6,570 |
"""Foyer: Atomtyping and forcefield applying. """
from __future__ import print_function
from setuptools import find_packages, setup
#####################################
VERSION = "0.11.0"
ISRELEASED = True
if ISRELEASED:
__version__ = VERSION
else:
__version__ = VERSION + ".dev0"
#####################################
setup(
name="foyer",
version=__version__,
description=__doc__.split("\n")[0],
long_description=__doc__,
author="Janos Sallai, Christoph Klein",
author_email="janos.sallai@vanderbilt.edu, christoph.klein@vanderbilt.edu",
url="https://github.com/mosdef-hub/foyer",
download_url="https://github.com/mosdef-hub/foyer/tarball/{}".format(
__version__
),
packages=find_packages(),
package_data={
"foyer": [
"foyer/tests/*.txt",
"foyer/tests/files/*.mol2",
"foyer/tests/files/*.pdb",
"foyer/tests/files/*.xml",
"foyer/forcefields/*.xml",
"opls_validation/*/*.top",
"opls_validation/*/*.gro",
"opls_validation/*/*.mol2",
"opls_validation/oplsaa.ff/*",
"examples/files/*",
]
},
entry_points={
"foyer.forcefields": [
"load_OPLSAA = foyer.forcefields.forcefields:load_OPLSAA",
"load_TRAPPE_UA = foyer.forcefields.forcefields:load_TRAPPE_UA",
]
},
package_dir={"foyer": "foyer"},
include_package_data=True,
license="MIT",
zip_safe=False,
keywords="foyer",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Chemistry",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
],
)
| mosdef-hub/foyer | setup.py | Python | mit | 2,142 |
def multiply(*args):
res = 1
for x in args:
res *= x
return res
| OliverTED/pyshapes | pyshapes/dummy.py | Python | gpl-3.0 | 84 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import time
from unittestzero import Assert
from tests import TestCase
from pages.dashboard import DashboardPage
from pages.fxos_feedback_form import FxOSFeedbackFormPage
class TestFeedback(TestCase):
def test_submit_happy_feedback(self, mozwebqa):
timestamp = str(time.time())
desc = 'input-tests testing happy fxos feedback ' + timestamp
# 1. go to the feedback form
feedback_pg = FxOSFeedbackFormPage(mozwebqa)
feedback_pg.go_to_feedback_page()
# Verify there is a privacy link
feedback_pg.has_privacy_link
# 2. click on happy
feedback_pg.click_happy_feedback()
# 3. pick default country
feedback_pg.click_country_next()
# 4. pick default device
feedback_pg.click_device_next()
# 5. fill in description
feedback_pg.has_privacy_link
Assert.false(feedback_pg.is_submit_enabled)
feedback_pg.set_description(desc)
Assert.true(feedback_pg.is_submit_enabled)
# 6. fill in url
feedback_pg.set_url('http://example.com/foo')
# 7. fill in email address
# FIXME: check email input disabled
feedback_pg.check_email_checkbox()
# FIXME: check email input enabled
feedback_pg.set_email('foo@example.com')
# 8. submit
feedback_pg.submit(expect_success=True)
self.take_a_breather()
Assert.equal(feedback_pg.current_card, 'thanks')
# 9. verify
dashboard_pg = DashboardPage(mozwebqa)
dashboard_pg.go_to_dashboard_page()
dashboard_pg.search_for(desc)
resp = dashboard_pg.messages[0]
Assert.equal(resp.type.strip(), 'Happy')
Assert.equal(resp.body.strip(), desc.strip())
Assert.equal(resp.locale.strip(), 'English (US)')
# FIXME: test email (can't because it's hidden when not authenticated)
# FIXME: test url (can't because it's hidden when not authenticated)
def test_submit_sad_feedback(self, mozwebqa):
timestamp = str(time.time())
desc = 'input-tests testing sad fxos feedback ' + timestamp
# 1. go to the feedback form
feedback_pg = FxOSFeedbackFormPage(mozwebqa)
feedback_pg.go_to_feedback_page()
# 2. click on happy
feedback_pg.click_sad_feedback()
# 3. pick default country
feedback_pg.click_country_next()
# 4. pick default device
feedback_pg.click_device_next()
# 5. fill in description
feedback_pg.set_description(desc)
# 6. submit
feedback_pg.submit(expect_success=True)
self.take_a_breather()
Assert.equal(feedback_pg.current_card, 'thanks')
# 7. verify
dashboard_pg = DashboardPage(mozwebqa)
dashboard_pg.go_to_dashboard_page()
dashboard_pg.search_for(desc)
resp = dashboard_pg.messages[0]
Assert.equal(resp.type.strip(), 'Sad')
Assert.equal(resp.body.strip(), desc.strip())
Assert.equal(resp.locale.strip(), 'English (US)')
# FIXME: Test back and forth
# FIXME: Test happy submit with unicode
# FIXME: Test character counter
# FIXME: Test email verification
| staranjeet/fjord | smoketests/tests/fxos/test_fxos_feedback.py | Python | bsd-3-clause | 3,419 |
"""
Tests for DOT Adapter
"""
import unittest
from datetime import timedelta
from django.conf import settings
from django.test import TestCase
from django.utils.timezone import now
import ddt
from oauth2_provider import models
from student.tests.factories import UserFactory
# oauth_dispatch is not in CMS' INSTALLED_APPS so these imports will error during test collection
if settings.FEATURES.get("ENABLE_OAUTH2_PROVIDER"):
from ..adapters import DOTAdapter
from .constants import DUMMY_REDIRECT_URL, DUMMY_REDIRECT_URL2
from ..models import RestrictedApplication
@ddt.ddt
@unittest.skipUnless(settings.FEATURES.get("ENABLE_OAUTH2_PROVIDER"), "OAuth2 not enabled")
class DOTAdapterTestCase(TestCase):
"""
Test class for DOTAdapter.
"""
def setUp(self):
super(DOTAdapterTestCase, self).setUp()
self.adapter = DOTAdapter()
self.user = UserFactory()
self.public_client = self.adapter.create_public_client(
name='public app',
user=self.user,
redirect_uri=DUMMY_REDIRECT_URL,
client_id='public-client-id',
)
self.confidential_client = self.adapter.create_confidential_client(
name='confidential app',
user=self.user,
redirect_uri=DUMMY_REDIRECT_URL,
client_id='confidential-client-id',
)
self.restricted_client = self.adapter.create_confidential_client(
name='restricted app',
user=self.user,
redirect_uri=DUMMY_REDIRECT_URL2,
client_id='restricted-client-id',
)
self.restricted_app = RestrictedApplication.objects.create(application=self.restricted_client)
def test_restricted_app_unicode(self):
"""
Make sure unicode representation of RestrictedApplication is correct
"""
self.assertEqual(unicode(self.restricted_app), u"<RestrictedApplication '{name}'>".format(
name=self.restricted_client.name
))
@ddt.data(
('confidential', models.Application.CLIENT_CONFIDENTIAL),
('public', models.Application.CLIENT_PUBLIC),
)
@ddt.unpack
def test_create_client(self, client_name, client_type):
client = getattr(self, '{}_client'.format(client_name))
self.assertIsInstance(client, models.Application)
self.assertEqual(client.client_id, '{}-client-id'.format(client_name))
self.assertEqual(client.client_type, client_type)
def test_get_client(self):
"""
Read back one of the confidential clients (there are two)
and verify that we get back what we expected
"""
client = self.adapter.get_client(
redirect_uris=DUMMY_REDIRECT_URL,
client_type=models.Application.CLIENT_CONFIDENTIAL
)
self.assertIsInstance(client, models.Application)
self.assertEqual(client.client_type, models.Application.CLIENT_CONFIDENTIAL)
def test_get_client_not_found(self):
with self.assertRaises(models.Application.DoesNotExist):
self.adapter.get_client(client_id='not-found')
def test_get_client_for_token(self):
token = models.AccessToken(
user=self.user,
application=self.public_client,
)
self.assertEqual(self.adapter.get_client_for_token(token), self.public_client)
def test_get_access_token(self):
token = self.adapter.create_access_token_for_test(
'token-id',
client=self.public_client,
user=self.user,
expires=now() + timedelta(days=30),
)
self.assertEqual(self.adapter.get_access_token(token_string='token-id'), token)
def test_get_restricted_access_token(self):
"""
Make sure when generating an access_token for a restricted client
that the token is immediately expired
"""
self.adapter.create_access_token_for_test(
'expired-token-id',
client=self.restricted_client,
user=self.user,
expires=now() + timedelta(days=30),
)
readback_token = self.adapter.get_access_token(token_string='expired-token-id')
self.assertTrue(RestrictedApplication.verify_access_token_as_expired(readback_token))
| a-parhom/edx-platform | openedx/core/djangoapps/oauth_dispatch/tests/test_dot_adapter.py | Python | agpl-3.0 | 4,303 |
import json
import os
import sys
import datetime
from unittest import TestCase
# Prevent relative import errors
file_ = os.path.abspath(__file__)
tests_ = os.path.dirname(file_)
products_ = os.path.dirname(tests_)
shopify_ = os.path.dirname(products_)
root = os.path.dirname(shopify_)
sys.path.append(root)
from shopify.products import Product
class TestProduct(TestCase):
def setUp(self):
with open(os.path.join(tests_, 'product.json'), 'rb') as f:
self.data = json.loads(f.read())
self.product = Product(self.data)
def test_id(self):
self.failUnlessEqual(self.data['id'], self.product.id)
def test_title(self):
self.failUnlessEqual(self.data['title'], self.product.title)
def test_body_html(self):
self.failUnlessEqual(self.data['body_html'], self.product.body_html)
def test_vendor(self):
self.failUnlessEqual(self.data['vendor'], self.product.vendor)
def test_product_type(self):
self.failUnlessEqual(self.data['product_type'], self.product.product_type)
def _compare_dt(self, og_dt_key, dt2):
og = self.data[og_dt_key].rpartition('-')[0] # Remove the utc offset from the end of the timestamp
og_dt = datetime.datetime.strptime(og, '%Y-%m-%dT%H:%M:%S')
psr_dt = dt2.replace(tzinfo=None)
self.failUnlessEqual(og_dt, psr_dt)
def test_created_at(self):
self._compare_dt('created_at', self.product.created_at)
def test_handle(self):
self.failUnlessEqual(self.data['handle'], self.product.handle)
def test_updated_at(self):
self._compare_dt('updated_at', self.product.updated_at)
def test_published_at(self):
self._compare_dt('published_at', self.product.published_at)
def test_template_suffix(self):
self.failUnlessEqual(self.data['template_suffix'], self.product.template_suffix)
def test_published_scope(self):
self.failUnlessEqual(self.data['published_scope'], self.product.published_scope)
def test_tags(self):
self.failUnlessEqual(self.data['tags'], self.product.tags)
if __name__ == '__main__':
import unittest
unittest.main()
| ziplokk1/python-shopify-api | shopify/products/tests/test_product.py | Python | unlicense | 2,175 |
import hsm
from hsm import actor
from hsm import runtime
class Machine(actor.TopState):
def __init__(self):
self._error = None
def on_error(self, error):
self._error = error
self.transition(ErrorState)
@actor.initial_state
class Off(Machine):
def on_start(self):
self.transition(On)
class On(Machine):
def on_stop(self):
self.transition(Off)
@actor.initial_state
class WaitCommand(On):
def on_start_server(self):
self.transition(WaitConnection)
class WaitConnection(On):
def on_stop_server(self):
self.transition(WaitCommand)
class ErrorState(Machine):
def _enter(self):
print "enter %s State, error code = %s" % (self.__class__.__name__, self._error)
if __name__ == '__main__':
print "test simple hsm"
mac = Machine()
st = mac.get_state()
assert(Off == st)
mac.send_start()
runtime.dispatch_all_msg()
st = mac.get_state()
assert(WaitCommand == st)
mac.send_error("ERROR 404")
runtime.dispatch_all_msg()
st = mac.get_state()
assert(ErrorState == st) | eneabio/hsm | hsm/test/simple_hsm.py | Python | mit | 1,058 |
from decimal import Decimal as D
from django.test import TestCase
from oscar.apps.offer import models
from oscar.test.basket import add_product, add_products
from oscar.test import factories
class TestACountConditionWithPercentageDiscount(TestCase):
def setUp(self):
range = models.Range(
name="All products", includes_all_products=True)
condition = models.CountCondition(
range=range,
type=models.Condition.COUNT,
value=3)
benefit = models.PercentageDiscountBenefit(
range=range,
type=models.Benefit.PERCENTAGE,
value=20,
max_affected_items=1)
self.offer = models.ConditionalOffer(
name="Test",
offer_type=models.ConditionalOffer.SITE,
condition=condition,
benefit=benefit)
def test_consumes_correct_number_of_products_for_3_product_basket(self):
basket = factories.create_basket(empty=True)
add_product(basket, D('1'), 3)
self.assertTrue(self.offer.is_condition_satisfied(basket))
discount = self.offer.apply_benefit(basket)
self.assertTrue(discount.discount > 0)
self.assertEqual(3, basket.num_items_with_discount)
self.assertEqual(0, basket.num_items_without_discount)
self.assertFalse(self.offer.is_condition_satisfied(basket))
def test_consumes_correct_number_of_products_for_4_product_basket(self):
basket = factories.create_basket(empty=True)
add_products(basket, [(D('1'), 2), (D('1'), 2)])
self.assertTrue(self.offer.is_condition_satisfied(basket))
discount = self.offer.apply_benefit(basket)
self.assertTrue(discount.discount > 0)
self.assertEqual(3, basket.num_items_with_discount)
self.assertEqual(1, basket.num_items_without_discount)
self.assertFalse(self.offer.is_condition_satisfied(basket))
def test_consumes_correct_number_of_products_for_6_product_basket(self):
basket = factories.create_basket(empty=True)
add_products(basket, [(D('1'), 3), (D('1'), 3)])
# First application
discount = self.offer.apply_benefit(basket)
self.assertTrue(discount.discount > 0)
self.assertEqual(3, basket.num_items_with_discount)
self.assertEqual(3, basket.num_items_without_discount)
# Second application
discount = self.offer.apply_benefit(basket)
self.assertTrue(discount.discount > 0)
self.assertEqual(6, basket.num_items_with_discount)
| ahmetdaglarbas/e-commerce | tests/integration/offer/combination_tests.py | Python | bsd-3-clause | 2,562 |
#!/usr/bin/env python
import os
import re
import sys
group_id = '142394'
def stamp(html):
"""Stamp a Python HTML documentation page with the SourceForge logo"""
def replace(m):
return ('<span class="release-info">%s '
'Hosted on <a href="http://sourceforge.net">'
'<img src="http://sourceforge.net/'
'sflogo.php?group_id=%s&type=1" width="88" height="31"'
'border="0" alt="SourceForge Logo"></a></span>'
% (m.group(1), group_id))
mailRe = re.compile(r'<span class="release-info">(.*)</span>')
## m = mailRe.search(html)
## if m:
## print m.groups()
return re.sub(mailRe, replace, html)
# stamp()
if __name__ == '__main__':
for name in sys.argv[1:]:
html = open(name, 'r').read()
text = stamp(html)
if text != html:
os.remove(name)
file = open(name, 'w')
file.write(text)
file.close()
| PyQwt/PyQwt5 | Doc/sourceforge.py | Python | gpl-2.0 | 990 |
# (C) British Crown Copyright 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Experimental code for fast loading of structured UM data."""
from __future__ import (absolute_import, division, print_function)
import itertools
from netCDF4 import netcdftime
import numpy as np
from iris.fileformats.um._optimal_array_structuring import \
optimal_array_structure
from biggus import ArrayStack
from iris.fileformats.pp import PPField3
class FieldCollation(object):
"""
An object representing a group of UM fields with array structure that can
be vectorized into a single cube.
For example:
Suppose we have a set of 28 fields repeating over 7 vertical levels for
each of 4 different data times. If a FieldCollation is created to contain
these, it can identify that this is a 4*7 regular array structure.
This FieldCollation will then have the following properties:
* within 'element_arrays_and_dims' :
Element 'blev' have the array shape (7,) and dims of (1,).
Elements 't1' and 't2' have shape (4,) and dims (0,).
The other elements (lbft, lbrsvd4 and lbuser5) all have scalar array
values and dims=None.
.. note::
If no array structure is found, the element values are all
either scalar or full-length 1-D vectors.
"""
def __init__(self, fields):
"""
Args:
* fields (iterable of :class:`iris.fileformats.pp.PPField`):
The fields in the collation.
"""
self._fields = tuple(fields)
self._data_cache = None
assert len(self.fields) > 0
self._structure_calculated = False
self._vector_dims_shape = None
self._primary_dimension_elements = None
self._element_arrays_and_dims = None
@property
def fields(self):
return self._fields
@property
def data(self):
if not self._structure_calculated:
self._calculate_structure()
if self._data_cache is None:
data_arrays = [f._data for f in self.fields]
self._data_cache = \
ArrayStack.multidim_array_stack(data_arrays,
self.vector_dims_shape)
return self._data_cache
@property
def vector_dims_shape(self):
"""The shape of the array structure."""
if not self._structure_calculated:
self._calculate_structure()
return self._vector_dims_shape
@property
def _UNUSED_primary_dimension_elements(self):
"""A set of names of the elements which are array dimensions."""
if not self._structure_calculated:
self._calculate_structure()
return self._primary_dimension_elements
@property
def element_arrays_and_dims(self):
"""
Value arrays for vector metadata elements.
A dictionary mapping element_name: (value_array, dims).
The arrays are reduced to their minimum dimensions. A scalar array
has an associated 'dims' of None (instead of an empty tuple).
"""
if not self._structure_calculated:
self._calculate_structure()
return self._element_arrays_and_dims
def _field_vector_element_arrays(self):
"""Define the field components used in the structure analysis."""
# Define functions to make t1 and t2 values as date-time tuples.
# These depend on header version (PPField2 has no seconds values).
t1_fn = lambda fld: (fld.lbyr, fld.lbmon, fld.lbdat,
fld.lbhr, fld.lbmin, getattr(fld, 'lbsec', 0))
t2_fn = lambda fld: (fld.lbyrd, fld.lbmond, fld.lbdatd,
fld.lbhrd, fld.lbmind, getattr(fld, 'lbsecd', 0))
# Return a list of (name, array) for the vectorizable elements.
component_arrays = [
('t1', np.array([t1_fn(fld) for fld in self.fields])),
('t2', np.array([t2_fn(fld) for fld in self.fields])),
('lbft', np.array([fld.lbft for fld in self.fields])),
('blev', np.array([fld.blev for fld in self.fields])),
('lblev', np.array([fld.lblev for fld in self.fields])),
('bhlev', np.array([fld.bhlev for fld in self.fields])),
('bhrlev', np.array([fld.bhrlev for fld in self.fields])),
('brsvd1', np.array([fld.brsvd[0] for fld in self.fields])),
('brsvd2', np.array([fld.brsvd[1] for fld in self.fields])),
('brlev', np.array([fld.brlev for fld in self.fields]))
]
return component_arrays
# Static factors for the _time_comparable_int routine (seconds per period).
_TIME_ELEMENT_MULTIPLIERS = np.cumprod([1, 60, 60, 24, 31, 12])[::-1]
def _time_comparable_int(self, yr, mon, dat, hr, min, sec):
"""
Return a single unique number representing a date-time tuple.
This calculation takes no account of the time field's real calendar,
instead giving every month 31 days, which preserves the required
time ordering.
"""
elements = np.array((yr, mon, dat, hr, min, sec))
return np.sum(elements * self._TIME_ELEMENT_MULTIPLIERS)
def _calculate_structure(self):
# Make value arrays for the vectorisable field elements.
element_definitions = self._field_vector_element_arrays()
# Identify the vertical elements and payload.
blev_array = dict(element_definitions).get('blev')
vertical_elements = ('lblev', 'bhlev', 'bhrlev',
'brsvd1', 'brsvd2', 'brlev')
# Make an ordering copy.
ordering_definitions = element_definitions[:]
# Replace time value tuples with integers and bind the vertical
# elements to the (expected) primary vertical element "blev".
for index, (name, array) in enumerate(ordering_definitions):
if name in ('t1', 't2'):
array = np.array(
[self._time_comparable_int(*tuple(val)) for val in array])
ordering_definitions[index] = (name, array)
if name in vertical_elements and blev_array is not None:
ordering_definitions[index] = (name, blev_array)
# Perform the main analysis: get vector dimensions, elements, arrays.
dims_shape, primary_elements, vector_element_arrays_and_dims = \
optimal_array_structure(ordering_definitions,
element_definitions)
# Replace time tuples in the result with real datetime-like values.
# N.B. so we *don't* do this on the whole (expanded) input arrays.
for name in ('t1', 't2'):
if name in vector_element_arrays_and_dims:
arr, dims = vector_element_arrays_and_dims[name]
arr_shape = arr.shape[:-1]
extra_length = arr.shape[-1]
# Flatten out the array apart from the last dimension,
# convert to netcdftime objects, then reshape back.
arr = np.array([netcdftime.datetime(*args)
for args in arr.reshape(-1, extra_length)]
).reshape(arr_shape)
vector_element_arrays_and_dims[name] = (arr, dims)
# Write the private cache values, exposed as public properties.
self._vector_dims_shape = dims_shape
self._primary_dimension_elements = primary_elements
self._element_arrays_and_dims = vector_element_arrays_and_dims
# Do all this only once.
self._structure_calculated = True
def _um_collation_key_function(field):
"""
Standard collation key definition for fast structured field loading.
The elements used here are the minimum sufficient to define the
'phenomenon', as described for :meth:`group_structured_fields`.
"""
return (field.lbuser[3], field.lbproc, field.lbuser[6])
def group_structured_fields(field_iterator):
"""
Collect structured fields into identified groups whose fields can be
combined to form a single cube.
Args:
* field_iterator (iterator of :class:`iris.fileformats.pp.PPField`):
A source of PP or FF fields. N.B. order is significant.
The function sorts and collates on phenomenon-relevant metadata only,
defined as the field components: 'lbuser[3]', 'lbuser[6]' and 'lbproc'.
Each distinct combination of these defines a specific phenomenon (or
statistical aggregation of one), and those fields appear as a single
iteration result.
Implicitly, within each result group, *all* other metadata components
should be either:
* the same for all fields,
* completely irrelevant, or
* used by a vectorised rule function (such as
:func:`iris.fileformats.pp_rules._convert_vector_time_coords`).
Returns:
An generator of
:class:`~iris.experimental.fileformats.um.FieldCollation` objects,
each of which contains a single collated group from the input fields.
"""
_fields = sorted(field_iterator, key=_um_collation_key_function)
for _, fields in itertools.groupby(_fields, _um_collation_key_function):
yield FieldCollation(tuple(fields))
| Jozhogg/iris | lib/iris/fileformats/um/_fast_load_structured_fields.py | Python | lgpl-3.0 | 9,902 |
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from rpcore.render_stage import RenderStage
class ApplyLightsStage(RenderStage):
""" This stage applies the lights to the scene using the gbuffer """
required_inputs = ["AllLightsData", "IESDatasetTex", "ShadowSourceData"]
required_pipes = ["GBuffer", "CellIndices", "PerCellLights", "ShadowAtlas",
"ShadowAtlasPCF", "CombinedVelocity"]
@property
def produced_pipes(self):
return {"ShadedScene": self.target.color_tex}
def create(self):
self.target = self.create_target("ApplyLights")
self.target.add_color_attachment(bits=16)
self.target.prepare_buffer()
def reload_shaders(self):
self.target.shader = self.load_shader("apply_lights.frag.glsl")
| eswartz/RenderPipeline | rpcore/stages/apply_lights_stage.py | Python | mit | 1,854 |
from social.backends.bitbucket import BitbucketOAuth as BitbucketBackend
| vxvinh1511/django-social-auth | social_auth/backends/contrib/bitbucket.py | Python | bsd-3-clause | 73 |
from __future__ import absolute_import, unicode_literals
import re
OEMBED_ENDPOINTS = {
"https://speakerdeck.com/oembed.{format}": [
"^http(?:s)?://speakerdeck\\.com/.+$"
],
"https://alpha-api.app.net/oembed": [
"^http(?:s)?://alpha\\.app\\.net/[^#?/]+/post/.+$",
"^http(?:s)?://photos\\.app\\.net/[^#?/]+/.+$"
],
"http://www.youtube.com/oembed": [
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/watch.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/v/.+$",
"^http(?:s)?://youtu\\.be/.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/user/.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/[^#?/]+#[^#?/]+/.+$",
"^http(?:s)?://m\\.youtube\\.com/index.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/profile.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/view_play_list.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?youtube\\.com/playlist.+$"
],
"http://backend.deviantart.com/oembed": [
"^http://(?:[-\\w]+\\.)?deviantart\\.com/art/.+$",
"^http://fav\\.me/.+$",
"^http://sta\\.sh/.+$",
"^http://(?:[-\\w]+\\.)?deviantart\\.com/[^#?/]+#/d.+$"
],
"http://blip.tv/oembed/": [
"^http://[-\\w]+\\.blip\\.tv/.+$"
],
"http://www.dailymotion.com/api/oembed/": [
"^http://[-\\w]+\\.dailymotion\\.com/.+$"
],
"http://www.flickr.com/services/oembed/": [
"^http(?:s)?://[-\\w]+\\.flickr\\.com/photos/.+$",
"^http(?:s)?://flic\\.kr\\.com/.+$"
],
"http://www.hulu.com/api/oembed.{format}": [
"^http://www\\.hulu\\.com/watch/.+$"
],
"http://www.nfb.ca/remote/services/oembed/": [
"^http://(?:[-\\w]+\\.)?nfb\\.ca/film/.+$"
],
"http://qik.com/api/oembed.{format}": [
"^http://qik\\.com/.+$",
"^http://qik\\.ly/.+$"
],
"http://revision3.com/api/oembed/": [
"^http://[-\\w]+\\.revision3\\.com/.+$"
],
"http://www.scribd.com/services/oembed": [
"^http://[-\\w]+\\.scribd\\.com/.+$"
],
"http://www.viddler.com/oembed/": [
"^http://[-\\w]+\\.viddler\\.com/v/.+$",
"^http://[-\\w]+\\.viddler\\.com/explore/.+$"
],
"http://www.vimeo.com/api/oembed.{format}": [
"^http(?:s)?://(?:www\\.)?vimeo\\.com/.+$",
"^http(?:s)?://player\\.vimeo\\.com/.+$"
],
"http://dotsub.com/services/oembed": [
"^http://dotsub\\.com/view/.+$"
],
"http://www.yfrog.com/api/oembed": [
"^http(?:s)?://(?:www\\.)?yfrog\\.com/.+$",
"^http(?:s)?://(?:www\\.)?yfrog\\.us/.+$"
],
"http://clikthrough.com/services/oembed": [
"^http(?:s)?://(?:[-\\w]+\\.)?clikthrough\\.com/.+$"
],
"http://www.kinomap.com/oembed": [
"^http://[-\\w]+\\.kinomap\\.com/.+$"
],
"https://photobucket.com/oembed": [
"^http://(?:[-\\w]+\\.)?photobucket\\.com/albums/.+$",
"^http://(?:[-\\w]+\\.)?photobucket\\.com/groups/.+$"
],
"http://api.instagram.com/oembed": [
"^http://instagr\\.am/p/.+$",
"^http[s]?://(?:www\\.)?instagram\\.com/p/.+$"
],
"https://www.slideshare.net/api/oembed/2": [
"^http://www\\.slideshare\\.net/.+$"
],
"http://tv.majorleaguegaming.com/oembed": [
"^http://mlg\\.tv/.+$",
"^http://tv\\.majorleaguegaming\\.com/.+$"
],
"http://my.opera.com/service/oembed": [
"^http://my\\.opera\\.com/.+$"
],
"http://skitch.com/oembed": [
"^http(?:s)?://(?:www\\.)?skitch\\.com/.+$",
"^http://skit\\.ch/.+$"
],
"https://api.twitter.com/1/statuses/oembed.{format}": [
"^http(?:s)?://twitter\\.com/(?:#!)?[^#?/]+/status/.+$"
],
"https://soundcloud.com/oembed": [
"^https://soundcloud\\.com/[^#?/]+/.+$"
],
"http://www.collegehumor.com/oembed.{format}": [
"^http://(?:www\\.)?collegehumor\\.com/video/.+$",
"^http://(?:www\\.)?collegehumor\\.com/video:.+$"
],
"http://www.polleverywhere.com/services/oembed/": [
"^http://www\\.polleverywhere\\.com/polls/.+$",
"^http://www\\.polleverywhere\\.com/multiple_choice_polls/.+$",
"^http://www\\.polleverywhere\\.com/free_text_polls/.+$"
],
"http://www.ifixit.com/Embed": [
"^http://www\\.ifixit\\.com/[^#?/]+/[^#?/]+/.+$"
],
"http://api.smugmug.com/services/oembed/": [
"^http(?:s)?://(?:www\\.)?smugmug\\.com/[^#?/]+/.+$"
],
"https://github.com/api/oembed": [
"^http(?:s)?://gist\\.github\\.com/.+$"
],
"http://animoto.com/services/oembed": [
"^http://animoto\\.com/play/.+$"
],
"http://www.rdio.com/api/oembed": [
"^http://(?:wwww\\.)?rdio\\.com/people/[^#?/]+/playlists/.+$",
"^http://[-\\w]+\\.rdio\\.com/artist/[^#?/]+/album/.+$"
],
"http://api.5min.com/oembed.{format}": [
"^http://www\\.5min\\.com/video/.+$"
],
"http://500px.com/photo/{1}/oembed.{format}": [
"^http://500px\\.com/photo/([^#?/]+)(?:.+)?$"
],
"http://api.dipdive.com/oembed.{format}": [
"^http://[-\\w]+\\.dipdive\\.com/media/.+$"
],
"http://video.yandex.ru/oembed.{format}": [
"^http://video\\.yandex\\.ru/users/[^#?/]+/view/.+$"
],
"http://www.mixcloud.com/oembed/": [
"^http://www\\.mixcloud\\.com/oembed/[^#?/]+/.+$"
],
"http://www.kickstarter.com/services/oembed": [
"^http(?:s)://[-\\w]+\\.kickstarter\\.com/projects/.+$"
],
"http://coub.com/api/oembed.{format}": [
"^http(?:s)?://coub\\.com/view/.+$",
"^http(?:s)?://coub\\.com/embed/.+$"
],
"http://www.screenr.com/api/oembed.{format}": [
"^http://www\\.screenr\\.com/.+$"
],
"http://www.funnyordie.com/oembed.{format}": [
"^http://www\\.funnyordie\\.com/videos/.+$"
],
"http://fast.wistia.com/oembed.{format}": [
"^https?://([^/]+\.)?(wistia.com|wi.st)/(medias|embed)/.+$"
],
"http://www.ustream.tv/oembed": [
"^http(?:s)?://(?:www\\.)?ustream\\.tv/.+$",
"^http(?:s)?://(?:www\\.)?ustream\\.com/.+$",
"^http://ustre\\.am/.+$"
],
"http://wordpress.tv/oembed/": [
"^http://wordpress\\.tv/.+$"
],
"http://polldaddy.com/oembed/": [
"^http(?:s)?://(?:[-\\w]+\\.)?polldaddy\\.com/.+$"
],
"http://api.bambuser.com/oembed.{format}": [
"^http://bambuser\\.com/channel/[^#?/]+/broadcast/.+$",
"^http://bambuser\\.com/channel/.+$",
"^http://bambuser\\.com/v/.+$"
],
"http://www.ted.com/talks/oembed.{format}": [
"^http(?:s)?://(?:www\\.)?ted\\.com/talks/.+$",
"^http(?:s)?://(?:www\\.)?ted\\.com/talks/lang/[^#?/]+/.+$",
"^http(?:s)?://(?:www\\.)?ted\\.com/index\\.php/talks/.+$",
"^http(?:s)?://(?:www\\.)?ted\\.com/index\\.php/talks/lang/[^#?/]+/.+$"
],
"http://chirb.it/oembed.{format}": [
"^http://chirb\\.it/.+$"
],
"https://www.circuitlab.com/circuit/oembed/": [
"^http(?:s)?://(?:www\\.)?circuitlab\\.com/circuit/.+$"
],
"http://api.geograph.org.uk/api/oembed": [
"^http://(?:[-\\w]+\\.)?geograph\\.org\\.uk/.+$",
"^http://(?:[-\\w]+\\.)?geograph\\.co\\.uk/.+$",
"^http://(?:[-\\w]+\\.)?geograph\\.ie/.+$"
],
"http://geo.hlipp.de/restapi.php/api/oembed": [
"^http://geo-en\\.hlipp\\.de/.+$",
"^http://geo\\.hlipp\\.de/.+$",
"^http://germany\\.geograph\\.org/.+$"
],
"http://www.geograph.org.gg/api/oembed": [
"^http://(?:[-\\w]+\\.)?geograph\\.org\\.gg/.+$",
"^http://(?:[-\\w]+\\.)?geograph\\.org\\.je/.+$",
"^http://channel-islands\\.geograph\\.org/.+$",
"^http://channel-islands\\.geographs\\.org/.+$",
"^http://(?:[-\\w]+\\.)?channel\\.geographs\\.org/.+$"
],
"http://vzaar.com/api/videos/{1}.{format}": [
"^http://(?:www\\.)?vzaar\\.com/videos/([^#?/]+)(?:.+)?$",
"^http://www\\.vzaar\\.tv/([^#?/]+)(?:.+)?$",
"^http://vzaar\\.tv/([^#?/]+)(?:.+)?$",
"^http://vzaar\\.me/([^#?/]+)(?:.+)?$",
"^http://[-\\w]+\\.vzaar\\.me/([^#?/]+)(?:.+)?$"
],
"http://api.minoto-video.com/services/oembed.{format}": [
"^http://api\\.minoto-video\\.com/publishers/[^#?/]+/videos/.+$",
"^http://dashboard\\.minoto-video\\.com/main/video/details/.+$",
"^http://embed\\.minoto-video\\.com/.+$"
],
"http://www.videojug.com/oembed.{format}": [
"^http(?:s)?://(?:[-\\w]+\\.)?videojug\\.com/film/.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?videojug\\.com/payer/.+$",
"^http(?:s)?://(?:[-\\w]+\\.)?videojug\\.com/interview/.+$"
],
"http://videos.sapo.pt/oembed": [
"^http(?:s)?://videos\\.sapo\\.pt/.+$"
],
"http://vhx.tv/services/oembed.{format}": [
"^http(?:s)?://(?:www\\.)?vhx\\.tv/.+$"
],
"http://api.justin.tv/api/embed/from_url.{format}": [
"^http(?:s)?://(?:www\\.)?justin\\.tv/.+$"
],
"http://official.fm/services/oembed.{format}": [
"^http(?:s)?://official\\.fm/.+$"
],
"http://huffduffer.com/oembed": [
"^http(?:s)?://(?:www\\.)?huffduffer\\.com/[^#?/]+/.+$"
],
"https://embed.spotify.com/oembed/": [
"^http(?:s)?://open\\.spotify\\.com/.+$",
"^http(?:s)?://spoti\\.fi/.+$"
],
"http://shoudio.com/api/oembed": [
"^http://shoudio\\.com/.+$",
"^http://shoud\\.io/.+$"
],
"http://api.mobypicture.com/oEmbed": [
"^http(?:s)?://(?:www\\.)?mobypicture\\.com/user/[^#?/]+/view/.+$",
"^http(?:s)?://(?:www\\.)?moby\\.to/.+$"
],
"http://www.23hq.com/23/oembed": [
"^http(?:s)?://(?:www\\.)?23hq\\.com/[^#?/]+/photo/.+$"
],
"http://gmep.org/oembed.{format}": [
"^http(?:s)?://(?:www\\.)?gmep\\.org/.+$",
"^http(?:s)?://gmep\\.imeducate\\.com/.+$"
],
"http://oembed.urtak.com/1/oembed": [
"^http(?:s)?://(?:[-\\w]+\\.)?urtak\\.com/.+$"
],
"http://cacoo.com/oembed.{format}": [
"^http(?:s)?://cacoo\\.com/.+$"
],
"http://api.dailymile.com/oembed": [
"^http(?:s)?://(?:www\\.)?dailymile\\.com/people/[^#?/]+/entries/.+$"
],
"http://www.dipity.com/oembed/timeline/": [
"^http(?:s)?://(?:www\\.)?dipity\\.com/timeline/.+$",
"^http(?:s)?://(?:www\\.)?dipity\\.com/voaweb/.+$"
],
"https://sketchfab.com/oembed": [
"^http(?:s)?://sketchfab\\.com/show/.+$"
],
"https://api.meetup.com/oembed": [
"^http(?:s)?://(?:www\\.)?meetup\\.com/.+$",
"^http(?:s)?://(?:www\\.)?meetup\\.ps/.+$"
],
"https://roomshare.jp/oembed.{format}": [
"^http(?:s)?://(?:www\\.)?roomshare\\.jp/(?:en/)?post/.+$"
],
"http://crowdranking.com/api/oembed.{format}": [
"^http(?:s)?://crowdranking\\.com/crowdrankings/.+$",
"^http(?:s)?://crowdranking\\.com/rankings/.+$",
"^http(?:s)?://crowdranking\\.com/topics/.+$",
"^http(?:s)?://crowdranking\\.com/widgets/.+$",
"^http(?:s)?://crowdranking\\.com/r/.+$"
],
"http://openapi.etsy.com/svc/oembed/": [
"^http(?:s)?://(?:www\\.)?etsy\\.com/listing/.+$"
],
"https://audioboom.com/publishing/oembed.{format}": [
"^http(?:s)?://audioboom\\.com/boos/.+$"
],
"http://demo.clikthrough.com/services/oembed/": [
"^http(?:s)?://demo\\.clikthrough\\.com/theater/video/.+$"
],
"http://www.ifttt.com/oembed/": [
"^http(?:s)?://ifttt\\.com/recipes/.+$"
],
# Added 11th December 2014 - http://developers.issuu.com/api/oembed.html
"http://issuu.com/oembed": [
"^http(?:s)?://(?:www\\.)?issuu\\.com/[^#?/]+/docs/.+$"
],
}
# Compile endpoints into regular expression objects
def compile_endpoints():
endpoints = {}
for endpoint in OEMBED_ENDPOINTS.keys():
endpoint_key = endpoint.replace('{format}', 'json')
endpoints[endpoint_key] = []
for pattern in OEMBED_ENDPOINTS[endpoint]:
endpoints[endpoint_key].append(re.compile(pattern))
return endpoints
OEMBED_ENDPOINTS_COMPILED = compile_endpoints()
def get_oembed_provider(url):
for endpoint in OEMBED_ENDPOINTS_COMPILED.keys():
for pattern in OEMBED_ENDPOINTS_COMPILED[endpoint]:
if re.match(pattern, url):
return endpoint
return
| Toshakins/wagtail | wagtail/wagtailembeds/oembed_providers.py | Python | bsd-3-clause | 12,377 |
from model.contact import Contact
testdata = [Contact(firstname="firstname1", middlename="middlename1", lastname="lastname1", nickname="nickname1",
address="addr1", company="company1", home="home1", homephone="homephone1", mobilephone="mobilephone1",
workphone="workphone1", secondaryphone="secphone1", email="email1", email2="email12", email3="email13",
homepage="homepage1", address2="address1", notes="notes1"),
Contact(firstname="firstname2", middlename="middlename2", lastname="lastname2", nickname="nickname2",
address="addr2", company="company2", home="home2", homephone="homephone2", mobilephone="mobilephone2",
workphone="workphone2", secondaryphone="secphone2", email="email2", email2="email22", email3="email23",
homepage="homepage2", address2="address2", notes="notes2")
] | VitalyW/python_training | data/contacts.py | Python | apache-2.0 | 930 |
# -*- coding: UTF-8 -*-
# Copyright 2017 Luc Saffre
# License: BSD (see file COPYING for details)
""".. management_command:: qtclient
Runs a Qt client for this application.
This requires::
pip install pyqt5
which is only available under Python 3.
"""
from __future__ import print_function
from django.core.management.base import BaseCommand
from django.conf import settings
import sys
from os.path import join
from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton,
QMessageBox, QDesktopWidget, QMainWindow,
QAction, qApp, QTextEdit, QHBoxLayout,
QVBoxLayout)
# from PyQt5.QtCore import QCoreApplication
from PyQt5.QtGui import QIcon
from lino.api import rt
from lino.core.menus import Menu # , MenuItem
from unipath import Path
images_path = Path(settings.STATIC_ROOT, Path('static/images/mjames'))
class ItemCaller(object):
def __init__(self, win, mi):
self.mi = mi
self.win = win
def __call__(self, event):
if False:
QMessageBox.question(
self.win, str(self.mi.label),
str(self.mi.help_text),
QMessageBox.Yes |
QMessageBox.No, QMessageBox.Yes)
self.frm = DetailForm(self.win, self.mi)
self.frm.show()
class DetailForm(QWidget):
def __init__(self, win, mi):
self.mi = mi
super().__init__(win)
self.setWindowTitle(str(self.mi.label))
self.initUI()
def initUI(self):
okButton = QPushButton("OK")
cancelButton = QPushButton("Cancel")
hbox = QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(okButton)
hbox.addWidget(cancelButton)
vbox = QVBoxLayout()
vbox.addStretch(1)
vbox.addLayout(hbox)
self.setLayout(vbox)
self.setGeometry(300, 300, 300, 150)
# self.show()
class LinoClient(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
textEdit = QTextEdit()
self.setCentralWidget(textEdit)
self.setGeometry(300, 300, 300, 220)
self.center()
self.setWindowTitle('qtclient.py')
self.setWindowIcon(QIcon('../../.static/logo.png'))
self.setToolTip('This is a <b>QWidget</b> widget')
self.menubar = self.menuBar()
user_type = rt.models.users.UserTypes.get_by_value('900')
menu = settings.SITE.get_site_menu(
settings.SITE.kernel, user_type)
self.load_menu(menu, self.menubar)
self.show()
self.statusBar().showMessage('Ready')
def load_menu(self, menu, menubar):
for mi in menu.items:
if isinstance(mi, Menu):
submenu = menubar.addMenu(str(mi.label))
self.load_menu(mi, submenu)
else:
a = QAction(QIcon(images_path.child('cancel.png')),
str(mi.label), self)
if mi.hotkey:
a.setShortcut(mi.hotkey)
a.setStatusTip(str(mi.help_text))
a.triggered.connect(ItemCaller(self, mi))
menubar.addAction(a)
# fileMenu = menubar.addMenu('&File')
exitAction = QAction(QIcon('exit.png'), '&Exit', self)
# exitAction.setShortcut('Ctrl+Q')
# exitAction.setStatusTip('Exit application')
# exitAction.triggered.connect(qApp.quit)
# fileMenu.addAction(exitAction)
# a = QAction(QIcon('detail.png'), '&Detail', self)
# a.triggered.connect(self.show_detail)
# fileMenu.addAction(a)
# self.toolbar = self.addToolBar('Exit')
# self.toolbar.addAction(exitAction)
# btn = QPushButton('Quit', self)
# btn.clicked.connect(QCoreApplication.instance().quit)
# btn.setToolTip('This is a <b>QPushButton</b> widget')
# btn.resize(btn.sizeHint())
# btn.move(50, 50)
# def show_detail(self, event):
# self.detail_form = DetailForm()
# self.detail_form.show()
def closeEvent(self, event):
if True:
event.accept()
return
reply = QMessageBox.question(self, 'MessageBox',
"This will close the window! Are you sure?",
QMessageBox.Yes |
QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
class Command(BaseCommand):
help = __doc__
def handle(self, *args, **options):
app = QApplication(sys.argv)
self.ex = LinoClient()
# sys.exit(app.exec_())
return app.exec_()
| khchine5/lino | lino/management/commands/qtclient.py | Python | bsd-2-clause | 5,132 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
this admin script allows you to :
- la base minimale du programme : ./administration 1
- base de test : ./administration 2
- add an user to the db : ./administration 0 email password (firstname timezone)
"""
from sys import argv, exc_info
from app import models
from app.models import User, Spending
from flask import Flask
from babel.dates import datetime
coreApp = Flask(__name__)
coreApp.config.from_object('config')
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy(coreApp)
def addUser(*args):
"""adding an user in the database: (email, passwd, firstname, timezone)"""
#timezone = 'fr_FR'
user = models.User(email=email, password=passwd, firstname=firstname, timezone=timezone)
db.session.add(user)
user.set_password(passwd)
db.session.commit()
print("User added: {}, {}, {}, {}".format(email, passwd, firstname, timezone))
def addBill(s_type, s_label, s_total, s_payer_id, s_user_ids):
"""
create a Spending in the database.
1) create the Spending model and fill its attributes except parts
2) estimate parts and add them to our Spending
3) adjust balance for each User with this parts
4) until no errors: add all of this in the database
"""
try:
bill = Spending()
bill.timestamp = datetime.utcnow()
bill.s_type = s_type
bill.label = label
bill.total = total
bill.payer_id = payer_id
db.session.add(bill)
db.session.query(User).get(payer_id).given_money += bill.total
tmp_parts = bill.computeParts(db.session, len(s_user_ids))
user_parts = []
for idx, i in enumerate(tmp_parts):
db.session.add(
Spending.Part(
spending=bill,
total=i, # == tmp_parts[idx],
user_id=s_user_ids[idx]
)
)
user_parts.append([s_user_ids[idx], i])
for user_id, user_bill in user_parts:
db.session.query(User).get(user_id).borrowed_money += user_bill
db.session.commit()
return 1
except:
db.session.rollback()
print(exc_info())
return 0
def addTypes():
db.session.add(models.Spending.Type(name=u"Alimentation (🍆)"))
db.session.add(models.Spending.Type(name=u"Alimentation (🍖)"))
db.session.add(models.Spending.Type(name=u"Alcool"))
db.session.add(models.Spending.Type(name=u"Divers"))
db.session.add(models.Spending.Type(name=u"Charges"))
db.session.add(models.Spending.Type(name=u"Bien-être"))
db.session.add(models.Spending.Type(name=u"Sorties"))
db.session.add(models.Spending.Type(name=u"Maison"))
db.session.add(models.Spending.Type(name=u"Virement"))
db.session.commit()
def delete_spendings_before_date():
# WARNING, IT WILL BREAKS BALANCES!!
from datetime import date
db_session = db.session() # it may not work: use the line below
# db_session = db.object_session(list(models.Spending.query)[0])
# list(models.Spending.query.filter(models.Spending.s_date>date(year=2021, month=9, day=1)).order_by(models.Spending.s_date.asc()))[3]
for spending in models.Spending.query.filter(models.Spending.s_date < date(year=2021, month=9, day=1)):
for part in spending.parts:
db_session.delete(part)
db_session.delete(spending)
db_session.commit()
if __name__ == '__main__':
if argv[1] == str(0):
email = argv[2]
passwd = argv[3]
try:
firstname = argv[4]
except:
firstname = None
try:
timezone = argv[5]
except:
timezone = None
addUser(email, passwd, firstname, timezone)
# initialization
if (argv[1] == str(1)) or (argv[1] == "init"):
addTypes()
a = models.User(email='b@t', firstname='Batoo')
a.set_password('coucou')
db.session.add(a)
db.session.commit()
# test of adding a bill
if argv[1] == str(2):
s_type = 'Alimentation'
label = 'Carottes'
total = 56.12
payer_id = 1
user_ids = [1, 2]
addBill(s_type, label, total, payer_id, user_ids) #spend=models.Spending.query.filter_by(id=1).first())
print('données de test initiales ajoutées: OK.')
| littleDad/mesLucioles | administration.py | Python | gpl-2.0 | 4,438 |
import time
from threading import Event, Thread
class TimerError(Exception):
pass
class Timer:
"""A super simple wall clock timer.
After an instance is created, it must be started with
:meth:`start`. It will run until it is :meth:`stop`pped.
A timer can be reset/reused by calling :meth:`stop` then
:meth:`start`.
The string value of a timer is its current elapsed time.
Can be used as a context manager, in which case the timer will be
started and stopped automatically::
with Timer() as t: # t is automatically start()ed
do_something()
# t is automatically stop()ped
print(t)
"""
def __init__(self, autoprint=False):
self.started = False
self.start_time = None
self.stopped = False
self.total_time = None
self.autoprint = autoprint
@property
def elapsed_time(self):
if self.stopped:
return self.total_time
else:
return time.monotonic() - self.start_time
def start(self):
if self.started:
raise TimerError('Already started')
self.started = True
self.start_time = time.monotonic()
self.stopped = False
self.total_time = None
def stop(self):
if not self.started:
raise TimerError('Not started')
if self.stopped:
raise TimerError('Already stopped')
self.started = False
self.stopped = True
self.total_time = time.monotonic() - self.start_time
if self.autoprint:
print(self)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_traceback):
self.stop()
def __str__(self):
m, s = divmod(self.elapsed_time, 60)
if m:
return '{m}m {s:.1f}s'.format(m=int(m), s=s)
if s > 0.01:
return '{s:.2f}s'.format(s=s)
if s > 0.001:
ms = s * 1_000
return '{ms:.0f}ms'.format(ms=ms)
if s > 0.000001:
us = s * 1_000_000
return '{us:.0f}us'.format(us=us)
ns = s * 1_000_000_000
return '{ns:.0f}ns'.format(ns=ns)
class PeriodicRunner(Thread):
"""Run a function periodically until stopped.
Like a normal thread, call :meth:`start` to start running the
function. Call :meth:`stop` to stop it. The default sleep time is 1
second. Pass an ``interval`` as an int or float to changee this.
"""
def __init__(self, target, interval=1, **kwargs):
super().__init__(target=target, daemon=True, **kwargs)
self._interval = interval
self._stopped = Event()
def run(self):
while True:
self._target(*self._args, **self._kwargs)
if self._stopped.wait(self._interval):
break
def stop(self):
self._stopped.set()
| bycycle-org/bycycle.core | bycycle/core/util.py | Python | gpl-3.0 | 2,920 |
import concurrent.futures
from functools import update_wrapper, partial
class SingleProcess(object):
"""
A fall-back parallel context that executes jobs sequentially.
"""
def __repr__(self):
return "{name}({extra})".format(name=self.__class__.__name__,
extra=self._extra_repr())
def _extra_repr(self):
return ""
@staticmethod
def _update_handler(handler, **kwargs):
handler_wrapper = partial(handler, **kwargs)
update_wrapper(handler_wrapper, handler)
return handler_wrapper
def execute(self, handler, args_iterate, **kwargs):
"""
Executes the queue of
`[handler(arg, **kwargs) for arg in args_iterate]` in a single process
(no speedup).
Parameters
----------
handler : callable
A function to be executed for each argument in `args_iterate`.
args_iterate : list
A list of (different) values of the first argument of the `handler`
function.
kwargs
Additional key arguments to `handler`.
Returns
-------
results : list
The result of applying the `handler` for each `arg` in the
`args_iterate`. The `i`-th item of the resulting list corresponds
to `args_iterate[i]` (the order is preserved).
"""
handler = self._update_handler(handler, **kwargs)
results = [handler(arg) for arg in args_iterate]
return results
class ProcessPoolExecutor(SingleProcess):
"""
The wrapper of Python built-in `concurrent.futures.ProcessPoolExecutor`
class.
`ProcessPoolExecutor` is recommended to use if you have one physical
machine (laptop or PC).
Parameters
----------
max_workers : int or None
The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
Default: None
"""
def __init__(self, max_workers=None):
self.max_workers = max_workers
def _extra_repr(self):
return "max_workers={0}".format(self.max_workers)
def _create_executor(self):
return concurrent.futures.ProcessPoolExecutor(self.max_workers)
def execute(self, handler, args_iterate, **kwargs):
"""
Executes the queue of
`[handler(arg, **kwargs) for arg in args_iterate]` in multiple
processes within one machine (`ProcessPoolExecutor`) or multiple
nodes (`MPIPoolExecutor` and `MPICommExecutor`).
Parameters
----------
handler : callable
A function to be executed for each argument in `args_iterate`.
args_iterate : list
A list of (different) values of the first argument of the `handler`
function.
kwargs
Additional key arguments to `handler`.
Returns
-------
results : list
The result of applying the `handler` for each `arg` in the
`args_iterate`. The `i`-th item of the resulting list corresponds
to `args_iterate[i]` (the order is preserved).
"""
handler = self._update_handler(handler, **kwargs)
# if not initialized, MPICommExecutor crashes if run without
# -m mpi4py.futures mode
results = []
with self._create_executor() as executor:
results = executor.map(handler, args_iterate)
# print(executor, results)
results = list(results) # convert a map to a list
return results
| INM-6/elephant | elephant/parallel/parallel.py | Python | bsd-3-clause | 3,668 |
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from tardis.tardis_portal.models import Dataset
@python_2_unicode_compatible
class Equipment(models.Model):
key = models.CharField(unique=True, max_length=30)
dataset = models.ManyToManyField(Dataset, blank=True)
description = models.TextField(blank=True)
make = models.CharField(max_length=60, blank=True)
model = models.CharField(max_length=60, blank=True)
type = models.CharField(max_length=60, blank=True)
serial = models.CharField(max_length=60, blank=True)
comm = models.DateField(null=True, blank=True)
decomm = models.DateField(null=True, blank=True)
url = models.URLField(null=True, blank=True, max_length=255)
def __str__(self):
return self.key
| wettenhj/mytardis | tardis/apps/equipment/models.py | Python | gpl-3.0 | 826 |
# -*- coding: utf-8 -*-
from pydwd.crawler.basecrawler import BaseCrawler
from pydwd.utils import ftphelper, translator
from pydwd.parser import weatherparser
class HourlyCrawler(BaseCrawler):
def __init__(self):
self._host = 'ftp-cdc.dwd.de'
self._station_file = 'TU_Stundenwerte_Beschreibung_Stationen.txt'
self._data_path = '/pub/CDC/observations_germany/climate/hourly'
BaseCrawler.__init__(self)
def __get_weather__(self, id):
url = 'ftp://' + self._host + self._data_path
_id = '%05d' % int(id)
# temperature file path
temp_path = '/air_temperature/recent/stundenwerte_TU_' + _id + '_akt.zip'
# wind path file path
wind_path = '/wind/recent/stundenwerte_FF_' + _id + '_akt.zip'
# cloudiness file path
clou_path = '/cloudiness/recent/stundenwerte_N_' + _id + '_akt.zip'
# preciption file path
prec_path = '/precipitation/recent/stundenwerte_RR_' + _id + '_akt.zip'
# pressure file path
pres_path = '/pressure/recent/stundenwerte_P0_' + _id + '_akt.zip'
temp_file = 'produkt_tu_stunde_'
wind_file = 'produkt_ff_stunde_'
clou_file = 'produkt_n_stunde_'
prec_file = 'produkt_rr_stunde_'
pres_file = 'produkt_p0_stunde_'
url_dict = {'temperature': [url + temp_path, temp_file],
'wind': [url + wind_path, wind_file],
'cloudiness': [url + clou_path, clou_file],
'precipitation': [url + prec_path, prec_file],
'pressure': [url + pres_path, pres_file]}
result = {}
for key, value in url_dict.iteritems():
content = ftphelper.get_file_txt_zip(value[0], value[1])
if content:
result[key] = weatherparser.parse(content, translation='en-hourly')
else:
result[key] = {}
return result
| ckaus/pydwd | pydwd/crawler/hourlycrawler.py | Python | mit | 1,940 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2016 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le type clef."""
from .base import BaseType
class Clef(BaseType):
"""Type d'objet: clef.
"""
nom_type = "clef"
empilable_sur = ["vêtement"]
| vlegoff/tsunami | src/primaires/objet/types/clef.py | Python | bsd-3-clause | 1,750 |
# Copyright 2019 The meson development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstractions to simplify compilers that implement an MSVC compatible
interface.
"""
import abc
import os
import typing as T
from ... import mesonlib
from ... import mlog
if T.TYPE_CHECKING:
from ...environment import Environment
vs32_instruction_set_args = {
'mmx': ['/arch:SSE'], # There does not seem to be a flag just for MMX
'sse': ['/arch:SSE'],
'sse2': ['/arch:SSE2'],
'sse3': ['/arch:AVX'], # VS leaped from SSE2 directly to AVX.
'sse41': ['/arch:AVX'],
'sse42': ['/arch:AVX'],
'avx': ['/arch:AVX'],
'avx2': ['/arch:AVX2'],
'neon': None,
} # T.Dicst[str, T.Optional[T.List[str]]]
# The 64 bit compiler defaults to /arch:avx.
vs64_instruction_set_args = {
'mmx': ['/arch:AVX'],
'sse': ['/arch:AVX'],
'sse2': ['/arch:AVX'],
'sse3': ['/arch:AVX'],
'ssse3': ['/arch:AVX'],
'sse41': ['/arch:AVX'],
'sse42': ['/arch:AVX'],
'avx': ['/arch:AVX'],
'avx2': ['/arch:AVX2'],
'neon': None,
} # T.Dicst[str, T.Optional[T.List[str]]]
msvc_buildtype_args = {
'plain': [],
'debug': ["/ZI", "/Ob0", "/Od", "/RTC1"],
'debugoptimized': ["/Zi", "/Ob1"],
'release': ["/Ob2", "/Gw"],
'minsize': ["/Zi", "/Gw"],
'custom': [],
} # type: T.Dict[str, T.List[str]]
# Clang-cl doesn't have /ZI, and /Zi and /Z7 do the same thing
# quoting the docs (https://clang.llvm.org/docs/MSVCCompatibility.html):
#
# Clang emits relatively complete CodeView debug information if /Z7 or /Zi is
# passed. Microsoft’s link.exe will transform the CodeView debug information
# into a PDB
clangcl_buildtype_args = msvc_buildtype_args.copy()
clangcl_buildtype_args['debug'] = ['/Zi', '/Ob0', '/Od', '/RTC1']
msvc_optimization_args = {
'0': [],
'g': ['/O0'],
'1': ['/O1'],
'2': ['/O2'],
'3': ['/O2'],
's': ['/O1'], # Implies /Os.
} # type: T.Dict[str, T.List[str]]
msvc_debug_args = {
False: [],
True: [] # Fixme!
} # type: T.Dict[bool, T.List[str]]
class VisualStudioLikeCompiler(metaclass=abc.ABCMeta):
"""A common interface for all compilers implementing an MSVC-style
interface.
A number of compilers attempt to mimic MSVC, with varying levels of
success, such as Clang-CL and ICL (the Intel C/C++ Compiler for Windows).
This class implements as much common logic as possible.
"""
std_warn_args = ['/W3']
std_opt_args = ['/O2']
# XXX: this is copied in this patch only to avoid circular dependencies
#ignore_libs = unixy_compiler_internal_libs
ignore_libs = ('m', 'c', 'pthread', 'dl', 'rt', 'execinfo')
internal_libs = ()
crt_args = {
'none': [],
'md': ['/MD'],
'mdd': ['/MDd'],
'mt': ['/MT'],
'mtd': ['/MTd'],
} # type: T.Dict[str, T.List[str]]
# /showIncludes is needed for build dependency tracking in Ninja
# See: https://ninja-build.org/manual.html#_deps
always_args = ['/nologo', '/showIncludes']
warn_args = {
'0': ['/W1'],
'1': ['/W2'],
'2': ['/W3'],
'3': ['/W4'],
} # type: T.Dict[str, T.List[str]]
INVOKES_LINKER = False
def __init__(self, target: str):
self.base_options = ['b_pch', 'b_ndebug', 'b_vscrt'] # FIXME add lto, pgo and the like
self.target = target
self.is_64 = ('x64' in target) or ('x86_64' in target)
# do some canonicalization of target machine
if 'x86_64' in target:
self.machine = 'x64'
elif '86' in target:
self.machine = 'x86'
else:
self.machine = target
self.linker.machine = self.machine
# Override CCompiler.get_always_args
def get_always_args(self) -> T.List[str]:
return self.always_args
def get_pch_suffix(self) -> str:
return 'pch'
def get_pch_name(self, header: str) -> str:
chopped = os.path.basename(header).split('.')[:-1]
chopped.append(self.get_pch_suffix())
pchname = '.'.join(chopped)
return pchname
def get_pch_base_name(self, header: str) -> str:
# This needs to be implemented by inherting classes
raise NotImplementedError
def get_pch_use_args(self, pch_dir: str, header: str) -> T.List[str]:
base = self.get_pch_base_name(header)
pchname = self.get_pch_name(header)
return ['/FI' + base, '/Yu' + base, '/Fp' + os.path.join(pch_dir, pchname)]
def get_preprocess_only_args(self) -> T.List[str]:
return ['/EP']
def get_compile_only_args(self) -> T.List[str]:
return ['/c']
def get_no_optimization_args(self) -> T.List[str]:
return ['/Od']
def get_output_args(self, target: str) -> T.List[str]:
if target.endswith('.exe'):
return ['/Fe' + target]
return ['/Fo' + target]
def get_optimization_args(self, optimization_level: str) -> T.List[str]:
return msvc_optimization_args[optimization_level]
def get_debug_args(self, is_debug: bool) -> T.List[str]:
return msvc_debug_args[is_debug]
def get_dependency_gen_args(self, outtarget: str, outfile: str) -> T.List[str]:
return []
def linker_to_compiler_args(self, args: T.List[str]) -> T.List[str]:
return ['/link'] + args
def get_gui_app_args(self, value: bool) -> T.List[str]:
# the default is for the linker to guess the subsystem based on presence
# of main or WinMain symbols, so always be explicit
if value:
return ['/SUBSYSTEM:WINDOWS']
else:
return ['/SUBSYSTEM:CONSOLE']
def get_pic_args(self) -> T.List[str]:
return [] # PIC is handled by the loader on Windows
def gen_vs_module_defs_args(self, defsfile: str) -> T.List[str]:
if not isinstance(defsfile, str):
raise RuntimeError('Module definitions file should be str')
# With MSVC, DLLs only export symbols that are explicitly exported,
# so if a module defs file is specified, we use that to export symbols
return ['/DEF:' + defsfile]
def gen_pch_args(self, header: str, source: str, pchname: str) -> T.Tuple[str, T.List[str]]:
objname = os.path.splitext(pchname)[0] + '.obj'
return objname, ['/Yc' + header, '/Fp' + pchname, '/Fo' + objname]
def openmp_flags(self) -> T.List[str]:
return ['/openmp']
def openmp_link_flags(self) -> T.List[str]:
return []
# FIXME, no idea what these should be.
def thread_flags(self, env: 'Environment') -> T.List[str]:
return []
@classmethod
def unix_args_to_native(cls, args: T.List[str]) -> T.List[str]:
result = []
for i in args:
# -mms-bitfields is specific to MinGW-GCC
# -pthread is only valid for GCC
if i in ('-mms-bitfields', '-pthread'):
continue
if i.startswith('-LIBPATH:'):
i = '/LIBPATH:' + i[9:]
elif i.startswith('-L'):
i = '/LIBPATH:' + i[2:]
# Translate GNU-style -lfoo library name to the import library
elif i.startswith('-l'):
name = i[2:]
if name in cls.ignore_libs:
# With MSVC, these are provided by the C runtime which is
# linked in by default
continue
else:
i = name + '.lib'
elif i.startswith('-isystem'):
# just use /I for -isystem system include path s
if i.startswith('-isystem='):
i = '/I' + i[9:]
else:
i = '/I' + i[8:]
elif i.startswith('-idirafter'):
# same as -isystem, but appends the path instead
if i.startswith('-idirafter='):
i = '/I' + i[11:]
else:
i = '/I' + i[10:]
# -pthread in link flags is only used on Linux
elif i == '-pthread':
continue
result.append(i)
return result
@classmethod
def native_args_to_unix(cls, args: T.List[str]) -> T.List[str]:
result = []
for arg in args:
if arg.startswith(('/LIBPATH:', '-LIBPATH:')):
result.append('-L' + arg[9:])
elif arg.endswith(('.a', '.lib')) and not os.path.isabs(arg):
result.append('-l' + arg)
else:
result.append(arg)
return result
def get_werror_args(self) -> T.List[str]:
return ['/WX']
def get_include_args(self, path: str, is_system: bool) -> T.List[str]:
if path == '':
path = '.'
# msvc does not have a concept of system header dirs.
return ['-I' + path]
def compute_parameters_with_absolute_paths(self, parameter_list: T.List[str], build_dir: str) -> T.List[str]:
for idx, i in enumerate(parameter_list):
if i[:2] == '-I' or i[:2] == '/I':
parameter_list[idx] = i[:2] + os.path.normpath(os.path.join(build_dir, i[2:]))
elif i[:9] == '/LIBPATH:':
parameter_list[idx] = i[:9] + os.path.normpath(os.path.join(build_dir, i[9:]))
return parameter_list
# Visual Studio is special. It ignores some arguments it does not
# understand and you can't tell it to error out on those.
# http://stackoverflow.com/questions/15259720/how-can-i-make-the-microsoft-c-compiler-treat-unknown-flags-as-errors-rather-t
def has_arguments(self, args: T.List[str], env: 'Environment', code, mode: str) -> T.Tuple[bool, bool]:
warning_text = '4044' if mode == 'link' else '9002'
with self._build_wrapper(code, env, extra_args=args, mode=mode) as p:
if p.returncode != 0:
return False, p.cached
return not(warning_text in p.stde or warning_text in p.stdo), p.cached
def get_compile_debugfile_args(self, rel_obj: str, pch: bool = False) -> T.List[str]:
pdbarr = rel_obj.split('.')[:-1]
pdbarr += ['pdb']
args = ['/Fd' + '.'.join(pdbarr)]
return args
def get_instruction_set_args(self, instruction_set: str) -> T.Optional[T.List[str]]:
if self.is_64:
return vs64_instruction_set_args.get(instruction_set, None)
return vs32_instruction_set_args.get(instruction_set, None)
def _calculate_toolset_version(self, version: int) -> T.Optional[str]:
if version < 1310:
return '7.0'
elif version < 1400:
return '7.1' # (Visual Studio 2003)
elif version < 1500:
return '8.0' # (Visual Studio 2005)
elif version < 1600:
return '9.0' # (Visual Studio 2008)
elif version < 1700:
return '10.0' # (Visual Studio 2010)
elif version < 1800:
return '11.0' # (Visual Studio 2012)
elif version < 1900:
return '12.0' # (Visual Studio 2013)
elif version < 1910:
return '14.0' # (Visual Studio 2015)
elif version < 1920:
return '14.1' # (Visual Studio 2017)
elif version < 1930:
return '14.2' # (Visual Studio 2019)
mlog.warning('Could not find toolset for version {!r}'.format(self.version))
return None
def get_toolset_version(self) -> T.Optional[str]:
# See boost/config/compiler/visualc.cpp for up to date mapping
try:
version = int(''.join(self.version.split('.')[0:2]))
except ValueError:
return None
return self._calculate_toolset_version(version)
def get_default_include_dirs(self) -> T.List[str]:
if 'INCLUDE' not in os.environ:
return []
return os.environ['INCLUDE'].split(os.pathsep)
def get_crt_compile_args(self, crt_val: str, buildtype: str) -> T.List[str]:
if crt_val in self.crt_args:
return self.crt_args[crt_val]
assert(crt_val == 'from_buildtype')
# Match what build type flags used to do.
if buildtype == 'plain':
return []
elif buildtype == 'debug':
return self.crt_args['mdd']
elif buildtype == 'debugoptimized':
return self.crt_args['md']
elif buildtype == 'release':
return self.crt_args['md']
elif buildtype == 'minsize':
return self.crt_args['md']
else:
assert(buildtype == 'custom')
raise mesonlib.EnvironmentException('Requested C runtime based on buildtype, but buildtype is "custom".')
def has_func_attribute(self, name: str, env: 'Environment') -> T.Tuple[bool, bool]:
# MSVC doesn't have __attribute__ like Clang and GCC do, so just return
# false without compiling anything
return name in ['dllimport', 'dllexport'], False
def get_argument_syntax(self) -> str:
return 'msvc'
class MSVCCompiler(VisualStudioLikeCompiler):
"""Spcific to the Microsoft Compilers."""
def __init__(self, target: str):
super().__init__(target)
self.id = 'msvc'
def get_compile_debugfile_args(self, rel_obj: str, pch: bool = False) -> T.List[str]:
args = super().get_compile_debugfile_args(rel_obj, pch)
# When generating a PDB file with PCH, all compile commands write
# to the same PDB file. Hence, we need to serialize the PDB
# writes using /FS since we do parallel builds. This slows down the
# build obviously, which is why we only do this when PCH is on.
# This was added in Visual Studio 2013 (MSVC 18.0). Before that it was
# always on: https://msdn.microsoft.com/en-us/library/dn502518.aspx
if pch and mesonlib.version_compare(self.version, '>=18.0'):
args = ['/FS'] + args
return args
def get_instruction_set_args(self, instruction_set: str) -> T.Optional[T.List[str]]:
if self.version.split('.')[0] == '16' and instruction_set == 'avx':
# VS documentation says that this exists and should work, but
# it does not. The headers do not contain AVX intrinsics
# and they can not be called.
return None
return super().get_instruction_set_args(instruction_set)
def get_buildtype_args(self, buildtype: str) -> T.List[str]:
args = msvc_buildtype_args[buildtype]
if mesonlib.version_compare(self.version, '<18.0'):
args = [arg for arg in args if arg != '/Gw']
return args
def get_pch_base_name(self, header: str) -> str:
return os.path.basename(header)
class ClangClCompiler(VisualStudioLikeCompiler):
"""Spcific to Clang-CL."""
def __init__(self, target: str):
super().__init__(target)
self.id = 'clang-cl'
def has_arguments(self, args: T.List[str], env: 'Environment', code, mode: str) -> T.Tuple[bool, bool]:
if mode != 'link':
args = args + ['-Werror=unknown-argument']
return super().has_arguments(args, env, code, mode)
def get_toolset_version(self) -> T.Optional[str]:
# XXX: what is the right thing to do here?
return '14.1'
def get_pch_base_name(self, header: str) -> str:
return header
def get_buildtype_args(self, buildtype: str) -> T.List[str]:
return clangcl_buildtype_args[buildtype]
| becm/meson | mesonbuild/compilers/mixins/visualstudio.py | Python | apache-2.0 | 15,968 |
import json
import math
from etherealpost.db import helpers
from etherealpost.db.cache import Cache
from etherealpost.db.helpers import gold_string, time_left_string
from etherealpost.db.item import Item
import time
from etherealpost.db.realm import Realm
from etherealpost.db.realm_name import RealmName
class Db(object):
def __init__(self, db):
self.db = db
self.cache = Cache()
def total_auctions(self):
return self.db.Auction.count()
def db_size(self, cache=True):
if cache:
c = self.cache.get('mdbsize')
if c is not None:
return float(c)
db_size = self.db.command({'dbStats': 1})['dataSize']
self.cache.set('mdbsize', db_size, ex=(5*60)) # Cache for 5 mins
return db_size
def total_documents(self, cache=True):
if cache:
c = self.cache.get('mdbobjs')
if c is not None:
return int(c)
total_objs = self.db.command({'dbStats': 1})['objects']
self.cache.set('mdbobjs', total_objs,
ex=(5*60)) # Cache for 5 mins
return total_objs
def data_last_updated(self, cache=True):
if cache:
c = self.cache.get('lup')
if c is not None:
return int(c)
data = self.db.Realm.find({}).sort([('lastUpdated', -1)]).limit(1)[0]
last_updated = data['lastUpdated']
self.cache.set('lup', last_updated, ex=10) # Cache for 10 seconds
return last_updated
def get_auctions_for_item(self, item, realm, cache=True):
"""
:type item: int
:type realm: Realm
:type cache: bool
:rtype: list[Auction]
"""
key = 'i:{0}:{1}'.format(realm.id, item)
lm = realm.lastModified
if cache is True:
c = self.cache.get_json(key=key, last_modified=lm)
if c is not None:
return c
cursor = self.db.Auction.find({
'realm': realm.id,
'item': int(item)
})
auctions = []
for auc in cursor:
del(auc['_id'])
auctions.append(auc)
self.cache.set_json(key=key, data=auctions, last_modified=lm)
return auctions
def get_item(self, item_id):
item = self.db.Item.find_one({'_id': int(item_id)})
return Item(**item) if item is not None else None
@staticmethod
def get_auction_mp(auctions):
if len(auctions):
bp = [math.ceil(a['buyout']/a['quantity']) for a in auctions]
return helpers.get_market_price(bp)
else:
return 0
def get_slugs_by_owner_realm(self, cache=True):
if cache:
c = self.cache.get('slug:or')
if c is not None:
return json.loads(c.decode('utf-8'))
cursor = self.db.RealmName.find({})
slugs = {}
for c in cursor:
if 'ownerRealm' in c:
ownr = c['ownerRealm']
if isinstance(ownr, list):
for i in ownr:
slugs[i] = c['slug']
else:
slugs[ownr] = c['slug']
self.cache.set('slug:or', json.dumps(slugs), ex=(12*60*60)) # 12 hours
return slugs
def names_by_slug(self, cache=True):
if cache:
c = self.cache.get('names:slug')
if c is not None:
return json.loads(c.decode('utf-8'))
cursor = self.db.RealmName.find()
slugs = {}
for c in cursor:
if 'name' in c and 'slug' in c:
slugs[c['slug']] = c['name']
self.cache.set('names:slug', json.dumps(slugs),
ex=(12 * 60 * 60)) # 12 hours
return slugs
def get_unique_sellers(self, realm, limit=None):
query = [
{'$match': {'realm': int(realm.id)}},
{'$group': {
'_id': {'owner': '$owner', 'ownerRealm': '$ownerRealm'},
'count': {'$sum': 1},
'buyout_total': {'$sum': '$buyout'}
}},
{'$sort': {
'count': -1
}}
]
if limit is not None:
query.append({'$limit': limit})
return list(self.db.Auction.aggregate(query))
def get_market_cap(self, realm):
query = [
{'$match': {'realm': int(realm.id)}},
{'$group': {
'_id': None,
'market_cap': {'$sum': '$buyout'},
}}
]
return list(self.db.Auction.aggregate(query))[0]['market_cap']
def get_unique_items(self, realm, limit=None):
query = [
{'$match': {'realm': int(realm.id)}},
{'$group': {
'_id': {'item': '$item'},
'count': {'$sum': 1},
'buyout_total': {'$sum': '$buyout'}
}},
{'$lookup': {
'from': 'Item',
'localField': '_id.item',
'foreignField': '_id',
'as': 'itemDetails'
}},
{'$sort': {
'count': -1
}}
]
if limit is not None:
query.append({'$limit': limit})
return list(self.db.Auction.aggregate(query))
def get_total_auctions(self, realm=None):
if realm is None:
return self. db.Auction.count()
else:
return self.db.Auction.count({'realm': int(realm.id)})
def get_realm_statistics(self, realm, cache=True):
key = 'rstats:{0}'.format(realm.id)
lm = realm.lastModified
if cache is True:
c = self.cache.get_json(key=key, last_modified=lm)
if c is not None:
print('Got from cache! Key: {0}'.format(lm))
return c
unique_sellers = self.get_unique_sellers(realm=realm)
market_cap = self.get_market_cap(realm=realm)
unique_items = self.get_unique_items(realm=realm)
total_auctions = self.get_total_auctions(realm=realm)
return_data = {
'total_auctions': total_auctions,
'unique_sellers': unique_sellers,
'top_sellers': unique_sellers[0:10],
'market_cap': market_cap,
'unique_items': len(unique_items),
'popular_items': unique_items[0:10]
}
# Don't cache if the realm is currently updating
if self.is_realm_running(realm=realm) is False:
self.cache.set_json(key=key, data=return_data, last_modified=lm)
return return_data
def is_realm_running(self, realm):
# Get the current state of the realm
realm = self.get_realm_from_url(region=realm.region,
slug=realm.realms[0])
# Don't cache if the realm is currently updating
return realm.runningNow
def get_realm_stats_for_item(self, realm, item_id, cache=True):
key = 'ph:i:{0}:{1}'.format(realm.id, item_id)
lm = realm.lastModified
if cache is True:
c = self.cache.get_json(key=key, last_modified=lm)
if c is not None:
return c
price_history = self.price_history(realm=realm, item_id=item_id,
region=False)
return_data = {
'median_market': price_history['median_market'],
'avg_market': price_history['avg_market'],
'std_dev': price_history['std_dev'],
'ph_mp_chart': price_history['ph_mp_chart'],
'ph_qty_chart': price_history['ph_qty_chart']
}
# Don't cache if the realm is currently updating
if self.is_realm_running(realm=realm) is False:
self.cache.set_json(key=key, data=return_data, last_modified=lm)
return return_data
def get_owner_realm_by_slug(self, slug, cache=True):
key = 'or:slug:{0}'.format(slug)
if cache:
c = self.cache.get(key)
if c is not None:
return c.decode('utf-8')
realm_name = self.db.RealmName.find_one({'slug': slug})
if realm_name is None:
return None
ownr = RealmName(**realm_name).ownerRealm
self.cache.set(key, ownr, ex=(12*60*60)) # Cache for 12 hours
return ownr
def get_seller_statistics(self, realm, owner, owner_realm, cache=True):
key = 'sh:{0}:{1}{2}'.format(realm.id, owner, owner_realm)
lm = realm.lastModified
if cache is True:
c = self.cache.get_json(key=key, last_modified=lm)
if c is not None:
return c
if isinstance(owner_realm, list):
or_query = {
'$in': owner_realm
}
else:
or_query = owner_realm
query = [
{'$match': {
'realm': int(realm.id),
'owner': owner.title(),
'ownerRealm': or_query
}},
{'$lookup': {
'from': 'Item',
'localField': 'item',
'foreignField': '_id',
'as': 'itemDetails'
}}
]
cursor = self.db.Auction.aggregate(query)
history_query = {'time': {
'$gte': int(time.time()) - (60 * 60 * 24 * 14)
},
'realm': int(realm.id),
'owner': owner.title(),
'ownerRealm': or_query
}
history_cursor = self.db.SellerHistory.find(history_query)
seller_history = [
[h['time'], h['auctionCount']] for h in history_cursor]
auctions = []
item_count = {}
max_buyout = 0
max_buyout_id = 0
buyout_value = 0
for auction in cursor:
del(auction['_id'])
auction['buyoutPer'] = math.ceil(auction['buyout'] /
auction['quantity'])
auctions.append(auction)
if auction['item'] != 82800:
buyout_value += auction['buyout']
# Get rid of pet cages
if auction['item'] not in item_count:
item_count[auction['item']] = 0
item_count[auction['item']] += 1
if auction['buyout'] > max_buyout:
max_buyout = auction['buyout']
max_buyout_id = auction['item']
import operator
most_common = max(item_count.items(), key=operator.itemgetter(1))
auctions = sorted(auctions, key=lambda x: (x['timeLeft'],
-x['item'],
x['buyoutPer'],
x['quantity']))
return_dict = {
'auctions': auctions,
'buyout_value': buyout_value,
'most_expensive': {'item': max_buyout_id, 'amount': max_buyout},
'most_common': {'item': most_common[0], 'count': most_common[1]},
'seller_history': seller_history
}
# Don't cache if the realm is currently updating
if self.is_realm_running(realm=realm) is False:
self.cache.set_json(key=key, data=return_dict, last_modified=lm)
return return_dict
def get_region_stats_for_item(self, realm, item_id, cache=True):
key = 'rs:{0}:{1}'.format(realm.region, item_id)
if cache is True:
c = self.cache.get(key=key)
if c is not None:
return json.loads(c.decode('utf-8'))
price_history = self.price_history(realm=realm, item_id=item_id,
region=True)
realm_ids = self.realm_ids_for_region(realm.region)
avg_cursor = self.db.PriceHistory.aggregate([
{'$match': {
'realm': {'$in': realm_ids},
'item': int(item_id)
}},
{'$sort': {
'time': -1
}},
{'$group': {
'_id': {'realm': '$realm'},
'mp': {'$last': '$market_price'},
}}
])
qty_cursor = self.db.Auction.aggregate([
{'$match': {
'realm': {'$in': realm_ids},
'item': int(item_id),
'buyout': {'$gt': 1}
}},
{'$group': {
'_id': None,
'qty': {'$sum': '$quantity'},
'num_auc': {'$sum': 1}
}}
])
mps = []
qty = []
num_auc = []
for c in avg_cursor:
mps.append(c['mp'])
for c in qty_cursor:
num_auc.append(c['num_auc'])
qty.append(c['qty'])
if len(mps):
data_dict = {
'avg_market': price_history['avg_market'],
'median_market': price_history['median_market'],
'std_dev': price_history['std_dev'],
'avg_mp': helpers.average(mps),
'total_qty': sum(qty),
'num_auc': sum(num_auc)
}
else:
data_dict = {
'avg_market': 0,
'median_market': 0,
'std_dev': 0,
'avg_mp': 0,
'total_qty': 0,
'num_auc': 0
}
self.cache.set(key=key, value=json.dumps(data_dict), ex=30)
return data_dict
def price_history(self, realm, item_id, region=False):
ph_date = int(time.time()) - (60*60*24*14)
if region:
realm_search = {
'$in': self.realm_ids_for_region(region=realm.region)
}
else:
realm_search = realm.id
cursor = self.db.PriceHistory.find({
'realm': realm_search,
'item': int(item_id),
'time': {'$gte': ph_date}
})
mps = []
price_history = []
ph_mp_chart = []
ph_qty_chart = []
for c in cursor:
price_history.append(c)
mps.append(c['market_price'])
ph_mp_chart.append([c['time']*1000, c['market_price']])
ph_qty_chart.append([c['time']*1000, c['total_qty']])
if len(mps):
avg_market = helpers.average(mps)
median_market = helpers.median(mps)
st_dev = helpers.std_dev(data=mps, population=True)
price_history = [c for c in cursor]
return {
'avg_market': avg_market,
'median_market': median_market,
'std_dev': st_dev,
'raw': price_history,
'ph_mp_chart': ph_mp_chart,
'ph_qty_chart': ph_qty_chart
}
else:
return {
'avg_market': 0,
'median_market': 0,
'std_dev': 0,
'raw': [],
'ph_mp_chart': [],
'ph_qty_chart': []
}
def realm_ids_for_region(self, region, cache=True):
key = 'rid:region:{0}'.format(region)
if cache:
c = self.cache.get(key)
if c is not None:
return json.loads(c.decode('utf-8'))
realms = self.db.Realm.find({'region': region})
ids = [r['_id'] for r in realms]
self.cache.set(key, json.dumps(ids), ex=(12*60*60)) # 12 hrs
return ids
def get_realm_from_url(self, region, slug):
result = self.db.Realm.find_one({'region': region, 'realms': slug})
return Realm(**result) if result is not None else None
def generate_auctions_for_display(self, auctions, region, slug):
for i, auction in enumerate(auctions):
if not 'bp' in auctions[i]:
bp = math.ceil(auction['buyout'] / auction['quantity'])
auctions[i]['bp'] = bp
slugs = self.get_slugs_by_owner_realm()
auctions = sorted(auctions, key=lambda x: (x['bp'], x['owner'],
x['quantity']))
dt = ''
for auction in auctions:
or_slug = slugs[auction['ownerRealm']]
dt += '<tr>'
dt += '<td class="numAuctions col-md-1">1</td>'
dt += '<td>{0}</td>'.format(auction['quantity'])
dt += '<td><a href="/{region}/{realm_slug}/seller/' \
'{owner_l}-{owner_realm}">{owner}</a></td>' \
.format(region=region, realm_slug=slug,
owner=auction['owner'],
owner_l=auction['owner'].lower(),
owner_realm=or_slug)
dt += '<td>{0}</td>'.format(time_left_string(auction['timeLeft']))
dt += '<td>{0}</td>'.format(gold_string(auction['buyout']))
dt += '<td>{0}</td>'.format(gold_string(auction['bp']))
dt += '</tr>'
return dt
@staticmethod
def sortkeypicker(keynames):
negate = set()
for i, k in enumerate(keynames):
if k[:1] == '-':
keynames[i] = k[1:]
negate.add(k[1:])
def getit(adict):
composite = [adict[kn] for kn in keynames]
for l, (m, v) in enumerate(zip(keynames, composite)):
if m in negate:
composite[l] = -v
return composite
return getit
| etherealpost/etherealpost.com | etherealpost/db/db.py | Python | mit | 17,493 |
# Ansible lookup plugin for getting the first available file given a list of items and a template
# (c) 2015,2016 David Lundgren <dlundgren@syberisle.net>
#
# Given a list of items it will attempt to find a file in the regular list of paths that is similar to the name
#
# - name: Add SSH keys
# authorized_key:
# user: "{{ item.username }}"
# state: present
# key: "{{ lookup('file', item.pubkey) }} }}"
# manage_dir: yes
# path: '/home/{{ item.username }}/.ssh/authorized_keys'
# with_available_file_from_items:
# items: "{{ users }}"
# name: files/ssh/keys/{{ item.username }}.pubkeys
# key: pubkey
#
# This will look in the {role}/files/ssh/keys/, {playbook}/files/ssh/keys/ folders for the {username}.pubkeys file.
# If the file is not found then the user is not returned in the list of items to be used.
DOCUMENTATION = """
author: David Lundgren
lookup: available_file_from_items
options:
lookup_file_paths:
type: list
default: []
ini:
- key: lookup_file_paths
section: defaults
yaml:
key: defaults.lookup_file_paths
"""
import os
from ansible import utils
from ansible import constants as C
from ansible.plugins.lookup import LookupBase
# ansible 2.4
try:
from ansible.parsing.plugin_docs import read_docstring
# load the definitions
dstring = read_docstring(__file__.replace('.pyc', '.py'), verbose = False, ignore_errors = False)
if dstring.get('doc', False):
if 'options' in dstring['doc'] and isinstance(dstring['doc']['options'], dict):
C.config.initialize_plugin_configuration_definitions('lookup', 'available_file_from_items', dstring['doc']['options'])
except:
None
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
ret = []
for item in terms['items']:
self._templar.set_available_variables({'item':item})
content = self.resolve_available_file_path(self._templar.template(terms['name'], preserve_trailing_newlines=True), variables)
if content:
item[terms['key']] = content
ret.append(item)
return ret
def get_paths(self, vars):
paths = []
basedir = self.get_basedir(vars)
try:
# Ansible 2.4
lookupPaths = C.config.get_config_value('lookup_file_paths', None, 'lookup', 'available_file_from_items')
except AttributeError:
# Ansible 2.3
lookupPaths = C.get_config(C.p, C.DEFAULTS, 'lookup_file_paths', None, [], value_type='list')
except TypeError:
# Ansible 2.2.x and below
lookupPaths = C.get_config(C.p, C.DEFAULTS, 'lookup_file_paths', None, [], islist=True)
for path in lookupPaths:
path = utils.path.unfrackpath(path)
if os.path.exists(path):
paths.append(path)
if '_original_file' in vars:
paths.append(self._loader.path_dwim_relative(basedir, '', vars['_original_file']))
if 'playbook_dir' in vars:
paths.append(vars['playbook_dir'])
paths.append(self._loader.path_dwim(basedir))
unq = []
[unq.append(i) for i in paths if not unq.count(i)]
return unq
def resolve_available_file_path(self, file, vars):
ret = None
for path in self.get_paths(vars):
path = os.path.join(path, 'files', file)
if os.path.exists(path):
ret = path
break
return ret | dlundgren/ansible-plugins | plugins/lookup/available_file_from_items.py | Python | mit | 3,607 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-10-26 17:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Board',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cur_player', models.IntegerField()),
('is_over', models.IntegerField()),
('session_key', models.CharField(max_length=100, unique=True)),
('state', models.CharField(max_length=10000)),
('is_opened', models.CharField(max_length=10000)),
],
),
migrations.CreateModel(
name='ChatText',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=1000)),
('username', models.CharField(max_length=100)),
('tag', models.IntegerField()),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=100)),
('board', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sweeper.Board')),
],
),
migrations.AddField(
model_name='chattext',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sweeper.Player'),
),
]
| Hasan-Jawaheri/games | sweeper/migrations/0001_initial.py | Python | gpl-3.0 | 1,834 |
from django import forms
import datetime
from gestion import models
class ObservacionesBecarioForm(forms.Form):
def __init__(self, *args, **kwargs):
self.becario = kwargs.pop('becario')
super(ObservacionesBecarioForm, self).__init__(*args, **kwargs)
if self.becario:
self.fields['observaciones'].initial = self.becario.observaciones
observaciones = forms.CharField(label="Observaciones",
widget=forms.Textarea(attrs={'cols':50, 'rows':5, 'class': 'form-control'}))
class CambioBecarioForm(forms.ModelForm):
class Meta:
model = models.CambiosPendientes
exclude = ('becario', "requiere_accion_manual")
def __init__(self, *args, **kwargs):
self.becario = kwargs.pop('becario')
super(CambioBecarioForm, self).__init__(*args, **kwargs)
if self.becario and self.becario.plaza_asignada:
self.fields['plaza'].initial = self.becario.plaza_asignada
ESTADOS = (
('A', 'Asignado'),
('R', 'Renuncia'),
('T', 'Traslado'),
)
plaza = forms.ModelChoiceField(
label="Plaza de cambio", queryset=models.Plaza.objects.all(), required=False, widget=forms.Select(attrs={'class': 'form-control'}))
estado_cambio = forms.ChoiceField(label="Estado de cambio", choices=ESTADOS, widget=forms.Select(attrs={'class': 'form-control'}))
fecha_cambio = forms.DateField(label="Fecha de cambio", widget=forms.SelectDateWidget(attrs={'class': 'form-control'}),
initial=datetime.date.today, required=False)
observaciones = forms.CharField(label="Observaciones del cambio", widget=forms.Textarea(attrs={'cols':50, 'rows':5, 'class': 'form-control'}),
required=False)
def clean(self):
cleaned_data = super(CambioBecarioForm, self).clean()
estado = cleaned_data.get('estado_cambio')
plaza = cleaned_data.get('plaza')
fecha = cleaned_data.get('fecha_cambio')
if estado == 'T' and not plaza:
self.add_error('plaza', 'Debe seleccionar una plaza si el cambio es un traslado.')
if estado == 'T' and plaza == self.becario.plaza_asignada:
self.add_error('plaza', 'Un becario no puede ser trasladado a su misma plaza.')
if estado == 'A' and self.becario.estado == 'A':
self.add_error('estado_cambio', 'Si desea asignar al becario a otra plaza, seleccione Traslado como estado de cambio.')
if fecha < datetime.date.today():
self.add_error('fecha_cambio', 'Seleccione una fecha en el futuro.')
| jeplasenciap/gespai | cambios/forms.py | Python | agpl-3.0 | 2,554 |
from django.shortcuts import render
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect
from .forms import NameForm
paginaPrincipal='/paginaPrincipal'
def loginUsuario(request):
if request.user.is_authenticated():
return HttpResponseRedirect(paginaPrincipal)
else:
if request.method == 'POST':
form = NameForm(request.POST)
if form.is_valid():
user = authenticate(username=form.cleaned_data['username'],password=form.cleaned_data['password'])
if user is not None:
if user.is_active:
login(request,user)
return HttpResponseRedirect(paginaPrincipal)
else:
error = "Esta cuenta ha sido desactivada. Lo sentimos mucho"
return render(request, 'login/', {'form': form,'error':error})
else:
error = "Usuario y/o contraseña incorrectos. Intente de nuevo, por favor."
return render(request, 'login/login.html', {'form': form,'error': error})
else:
form = NameForm()
return render(request, 'login/login.html', {'form': form})
def logoutUsuario(request):
logout(request)
return HttpResponseRedirect(paginaPrincipal)
| AliGhahraei/RateMe | login/views.py | Python | gpl-3.0 | 1,373 |
"""
Describe usage of our default container/image domain.
"""
from typing import Hashable
from benchbuild.environments.domain import model
def describe_layers():
def from_is_hashable():
layer = model.FromLayer('a')
assert isinstance(layer, Hashable)
def add_is_hashable():
layer = model.AddLayer(('a', 'b', 'c'), 'd')
assert isinstance(layer, Hashable)
def copy_is_hashable():
layer = model.CopyLayer(('a', 'b', 'c'), 'd')
assert isinstance(layer, Hashable)
def run_is_hashable():
layer = model.RunLayer(
'cmd', ('a', 'b', 'c'), dict(a='a', b='b', c='c')
)
assert isinstance(layer, Hashable)
def context_is_hashable():
layer = model.ContextLayer(lambda: None)
assert isinstance(layer, Hashable)
def env_is_hashable():
layer = model.UpdateEnv(dict(a='a', b='b', c='c'))
assert isinstance(layer, Hashable)
def workdir_is_hashable():
layer = model.WorkingDirectory('a')
assert isinstance(layer, Hashable)
def entrypoint_is_hashable():
layer = model.EntryPoint(('a', 'b', 'c'))
assert isinstance(layer, Hashable)
def cmd_is_hashable():
layer = model.SetCommand(('a', 'b', 'c'))
assert isinstance(layer, Hashable)
def describe_image():
def image_requires_name_and_base():
img = model.Image('name', model.FromLayer('base'), [])
assert img.name == 'name'
assert img.from_ == model.FromLayer('base')
assert len(img.layers) == 0
def can_append_layers_to_image():
img = model.Image('-', model.FromLayer('-'), [model.FromLayer('base')])
img.append(model.WorkingDirectory('abc'))
assert img.layers == [
model.FromLayer('base'),
model.WorkingDirectory('abc')
]
def can_prepend_layers_to_image():
img = model.Image(
'-', model.FromLayer('-'), [model.WorkingDirectory('abc')]
)
img.prepend(model.FromLayer('base'))
assert img.layers == [
model.FromLayer('base'),
model.WorkingDirectory('abc')
]
def is_hashable():
layers = [
model.FromLayer('a'),
model.AddLayer(('a', 'b', 'c'), 'd'),
model.CopyLayer(('a', 'b', 'c'), 'd'),
model.RunLayer('cmd', ('a', 'b', 'c'), dict(a='a', b='b', c='c')),
model.ContextLayer(lambda: None),
model.UpdateEnv(dict(a='a', b='b', c='c')),
model.WorkingDirectory('a'),
model.EntryPoint(('a', 'b', 'c')),
model.SetCommand(('a', 'b', 'c'))
]
img = model.Image('-', model.FromLayer('-'), layers)
assert isinstance(img, Hashable)
| PolyJIT/benchbuild | tests/environments/domain/test_model.py | Python | mit | 2,780 |
"""Game of life, see https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life
rules:
cells living in a 2d- array
Any live cell with two or three live neighbours survives.
Any dead cell with three live neighbours becomes a live cell.
All other live cells die in the next generation. Similarly, all other dead cells stay dead.
"""
a = [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
#b = [[False, False, False, False, False] for x in range(5)]
#board = [a,b]
def viewer(array, truechar="x", falsechar="."):
"""print a 2x2 array in text mode"""
for line in array:
for cell in line:
print(truechar if cell else falsechar, end="")
print()
def process(array, wrap_around = True, reborn_min = 3, reborn_max = 3,
stay_alive_min = 2, stay_alive_max=3):
"""calculates a new array based on conway's game of life rules on a given array"""
new = []
for line in array:
newline = [False for element in line]
new.append(newline)
for y, line in enumerate(array):
for x, value in enumerate(line):
counter = 0
for (dx, dy) in ((-1,-1), (-1,0), (-1,1), (0,-1), (0,1), (1,-1), (1,0), (1,1)):
if wrap_around:
if (y + dy) >= len(array):
dy = -y
if (x + dx) >= len(array[0]):
dx = -x
# <0 ..--> -1 --> is the last element in a python list
else:
if (y+dy) < 0 or (y+dy) >= len(array) or (x+dx) < 0 or (x+dx) >= len(array[0]):
continue
if array[y+dy][x+dx]:
counter += 1
# cell stay alive when 2 or 3 neighbors
if array[y][x] and counter >= stay_alive_min and counter <= stay_alive_max:
new[y][x] = True
# dead cell becomes alive when exactly 3 neighbors
elif not array[y][x] and counter >= reborn_min and counter <= reborn_max:
new[y][x] = True
return new
def game(board):
"""plays conways game of life on array a"""
while True:
viewer(board)
command = input("enter drücken")
if command == "quit":
break
board = process(board)
if __name__ == "__main__":
game(a)
| horstjens/ThePythonGameBook | en/python/unsorted/game_of_life.py | Python | gpl-3.0 | 2,429 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, Netservers Ltd. <support@netservers.co.uk>
# (c) 2017, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_storage_pool
short_description: Manages Primary Storage Pools on Apache CloudStack based clouds.
description:
- Create, update, put into maintenance, disable, enable and remove storage pools.
version_added: "2.4"
author:
- "Netservers Ltd. (@netservers)"
- "René Moser (@resmo)"
options:
name:
description:
- Name of the storage pool.
required: true
zone:
description:
- Name of the zone in which the host should be deployed.
- If not set, default zone is used.
storage_url:
description:
- URL of the storage pool.
- Required if C(state=present).
pod:
description:
- Name of the pod.
cluster:
description:
- Name of the cluster.
scope:
description:
- The scope of the storage pool.
- Defaults to cluster when C(cluster) is provided, otherwise zone.
choices: [ cluster, zone ]
managed:
description:
- Whether the storage pool should be managed by CloudStack.
- Only considere on creation.
hypervisor:
description:
- Required when creating a zone scoped pool.
choices: [ KVM, VMware, BareMetal, XenServer, LXC, HyperV, UCS, OVM, Simulator ]
storage_tags:
description:
- Tags associated with this storage pool.
provider:
description:
- Name of the storage provider e.g. SolidFire, SolidFireShared, DefaultPrimary, CloudByte.
default: DefaultPrimary
capacity_bytes:
description:
- Bytes CloudStack can provision from this storage pool.
capacity_iops:
description:
- Bytes CloudStack can provision from this storage pool.
allocation_state:
description:
- Allocation state of the storage pool.
choices: [ enabled, disabled ]
state:
description:
- State of the storage pool.
default: present
choices: [ present, absent ]
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: ensure a zone scoped storage_pool is present
local_action:
module: cs_storage_pool
zone: zone01
storage_url: rbd://admin:SECRET@ceph-mons.domain/poolname
provider: DefaultPrimary
name: Ceph RBD
scope: zone
hypervisor: KVM
- name: ensure a cluster scoped storage_pool is disabled
local_action:
module: cs_storage_pool
name: Ceph RBD
zone: zone01
cluster: cluster01
pod: pod01
storage_url: rbd://admin:SECRET@ceph-the-mons.domain/poolname
provider: DefaultPrimary
scope: cluster
allocation_state: disabled
- name: ensure a cluster scoped storage_pool is in maintenance
local_action:
module: cs_storage_pool
name: Ceph RBD
zone: zone01
cluster: cluster01
pod: pod01
storage_url: rbd://admin:SECRET@ceph-the-mons.domain/poolname
provider: DefaultPrimary
scope: cluster
allocation_state: maintenance
- name: ensure a storage_pool is absent
local_action:
module: cs_storage_pool
name: Ceph RBD
state: absent
'''
RETURN = '''
---
id:
description: UUID of the pool.
returned: success
type: string
sample: a3fca65a-7db1-4891-b97c-48806a978a96
created:
description: Date of the pool was created.
returned: success
type: string
sample: 2014-12-01T14:57:57+0100
capacity_iops:
description: IOPS CloudStack can provision from this storage pool
returned: when available
type: int
sample: 60000
zone:
description: The name of the zone.
returned: success
type: string
sample: Zone01
cluster:
description: The name of the cluster.
returned: when scope is cluster
type: string
sample: Cluster01
pod:
description: The name of the pod.
returned: when scope is cluster
type: string
sample: Cluster01
disk_size_allocated:
description: The pool's currently allocated disk space.
returned: success
type: int
sample: 2443517624320
disk_size_total:
description: The total size of the pool.
returned: success
type: int
sample: 3915055693824
disk_size_used:
description: The pool's currently used disk size.
returned: success
type: int
sample: 1040862622180
scope:
description: The scope of the storage pool.
returned: success
type: string
sample: cluster
hypervisor:
description: Hypervisor related to this storage pool.
returned: when available
type: string
sample: KVM
state:
description: The state of the storage pool as returned by the API.
returned: success
type: string
sample: Up
allocation_state:
description: The state of the storage pool.
returned: success
type: string
sample: enabled
path:
description: The storage pool path used in the storage_url.
returned: success
type: string
sample: poolname
overprovision_factor:
description: The overprovision factor of the storage pool.
returned: success
type: string
sample: 2.0
suitable_for_migration:
description: Whether the storage pool is suitable to migrate a volume or not.
returned: success
type: bool
sample: false
storage_capabilities:
description: Capabilities of the torage pool.
returned: success
type: dict
sample: {"VOLUME_SNAPSHOT_QUIESCEVM": "false"}
storage_tags:
description: the tags for the storage pool.
returned: success
type: list
sample: ["perf", "ssd"]
'''
# import cloudstack common
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
CS_HYPERVISORS,
)
class AnsibleCloudStackStoragePool(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackStoragePool, self).__init__(module)
self.returns = {
'capacityiops': 'capacity_iops',
'podname': 'pod',
'clustername': 'cluster',
'disksizeallocated': 'disk_size_allocated',
'disksizetotal': 'disk_size_total',
'disksizeused': 'disk_size_used',
'scope': 'scope',
'hypervisor': 'hypervisor',
'type': 'type',
'ip_address': 'ipaddress',
'path': 'path',
'overprovisionfactor': 'overprovision_factor',
'storagecapabilities': 'storage_capabilities',
'suitableformigration': 'suitable_for_migration',
}
self.allocation_states = {
# Host state: param state
'Up': 'enabled',
'Disabled': 'disabled',
'Maintenance': 'maintenance',
}
self.storage_pool = None
def _get_common_args(self):
return {
'name': self.module.params.get('name'),
'url': self.module.params.get('storage_url'),
'zoneid': self.get_zone(key='id'),
'provider': self.get_storage_provider(),
'scope': self.module.params.get('scope'),
'hypervisor': self.module.params.get('hypervisor'),
'capacitybytes': self.module.params.get('capacity_bytes'),
'capacityiops': self.module.params.get('capacity_iops'),
}
def _allocation_state_enabled_disabled_changed(self, pool, allocation_state):
if allocation_state in ['enabled', 'disabled']:
for pool_state, param_state in self.allocation_states.items():
if pool_state == pool['state'] and allocation_state != param_state:
return True
return False
def _handle_allocation_state(self, pool, state=None):
allocation_state = state or self.module.params.get('allocation_state')
if not allocation_state:
return pool
if self.allocation_states.get(pool['state']) == allocation_state:
return pool
# Cancel maintenance if target state is enabled/disabled
elif allocation_state in ['enabled', 'disabled']:
pool = self._cancel_maintenance(pool)
pool = self._update_storage_pool(pool=pool, allocation_state=allocation_state)
# Only an enabled host can put in maintenance
elif allocation_state == 'maintenance':
pool = self._update_storage_pool(pool=pool, allocation_state='enabled')
pool = self._enable_maintenance(pool=pool)
return pool
def _create_storage_pool(self):
args = self._get_common_args()
args.update({
'clusterid': self.get_cluster(key='id'),
'podid': self.get_pod(key='id'),
'managed': self.module.params.get('managed'),
})
scope = self.module.params.get('scope')
if scope is None:
args['scope'] = 'cluster' if args['clusterid'] else 'zone'
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('createStoragePool', **args)
return res['storagepool']
def _update_storage_pool(self, pool, allocation_state=None):
args = {
'id': pool['id'],
'capacitybytes': self.module.params.get('capacity_bytes'),
'capacityiops': self.module.params.get('capacity_iops'),
'tags': self.get_storage_tags(),
}
if self.has_changed(args, pool) or self._allocation_state_enabled_disabled_changed(pool, allocation_state):
self.result['changed'] = True
args['enabled'] = allocation_state == 'enabled' if allocation_state in ['enabled', 'disabled'] else None
if not self.module.check_mode:
res = self.query_api('updateStoragePool', **args)
pool = res['storagepool']
return pool
def _enable_maintenance(self, pool):
if pool['state'].lower() != "maintenance":
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('enableStorageMaintenance', id=pool['id'])
pool = self.poll_job(res, 'storagepool')
return pool
def _cancel_maintenance(self, pool):
if pool['state'].lower() == "maintenance":
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('cancelStorageMaintenance', id=pool['id'])
pool = self.poll_job(res, 'storagepool')
return pool
def get_storage_tags(self):
storage_tags = self.module.params.get('storage_tags')
if storage_tags is None:
return None
return ','.join(storage_tags)
def get_storage_pool(self, key=None):
if self.storage_pool is None:
zoneid = self.get_zone(key='id')
clusterid = self.get_cluster(key='id')
podid = self.get_pod(key='id')
args = {
'zoneid': zoneid,
'podid': podid,
'clusterid': clusterid,
'name': self.module.params.get('name'),
}
res = self.query_api('listStoragePools', **args)
if 'storagepool' not in res:
return None
self.storage_pool = res['storagepool'][0]
return self.storage_pool
def present_storage_pool(self):
pool = self.get_storage_pool()
if pool:
pool = self._update_storage_pool(pool=pool)
else:
pool = self._create_storage_pool()
if pool:
pool = self._handle_allocation_state(pool=pool)
return pool
def absent_storage_pool(self):
pool = self.get_storage_pool()
if pool:
self.result['changed'] = True
args = {
'id': pool['id'],
}
if not self.module.check_mode:
# Only a pool in maintenance can be deleted
self._handle_allocation_state(pool=pool, state='maintenance')
self.query_api('deleteStoragePool', **args)
return pool
def get_storage_provider(self, type="primary"):
args = {
'type': type,
}
provider = self.module.params.get('provider')
storage_providers = self.query_api('listStorageProviders', **args)
for sp in storage_providers.get('dataStoreProvider') or []:
if sp['name'].lower() == provider.lower():
return provider
self.fail_json(msg="Storage provider %s not found" % provider)
def get_pod(self, key=None):
pod = self.module.params.get('pod')
if not pod:
return None
args = {
'name': pod,
'zoneid': self.get_zone(key='id'),
}
pods = self.query_api('listPods', **args)
if pods:
return self._get_by_key(key, pods['pod'][0])
self.fail_json(msg="Pod %s not found" % self.module.params.get('pod'))
def get_cluster(self, key=None):
cluster = self.module.params.get('cluster')
if not cluster:
return None
args = {
'name': cluster,
'zoneid': self.get_zone(key='id'),
}
clusters = self.query_api('listClusters', **args)
if clusters:
return self._get_by_key(key, clusters['cluster'][0])
self.fail_json(msg="Cluster %s not found" % cluster)
def get_result(self, pool):
super(AnsibleCloudStackStoragePool, self).get_result(pool)
if pool:
self.result['storage_url'] = "%s://%s/%s" % (pool['type'], pool['ipaddress'], pool['path'])
self.result['scope'] = pool['scope'].lower()
self.result['storage_tags'] = pool['tags'].split(',') if pool.get('tags') else []
self.result['allocation_state'] = self.allocation_states.get(pool['state'])
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
storage_url=dict(),
zone=dict(),
pod=dict(),
cluster=dict(),
scope=dict(choices=['zone', 'cluster']),
hypervisor=dict(choices=CS_HYPERVISORS),
provider=dict(default='DefaultPrimary'),
capacity_bytes=dict(type='int'),
capacity_iops=dict(type='int'),
managed=dict(type='bool'),
storage_tags=dict(type='list', aliases=['storage_tag']),
allocation_state=dict(choices=['enabled', 'disabled', 'maintenance']),
state=dict(choices=['present', 'absent'], default='present'),
))
required_together = cs_required_together()
required_together.extend([
['pod', 'cluster'],
])
module = AnsibleModule(
argument_spec=argument_spec,
required_together=required_together,
required_if=[
('state', 'present', ['storage_url']),
],
supports_check_mode=True
)
acs_storage_pool = AnsibleCloudStackStoragePool(module)
state = module.params.get('state')
if state in ['absent']:
pool = acs_storage_pool.absent_storage_pool()
else:
pool = acs_storage_pool.present_storage_pool()
result = acs_storage_pool.get_result(pool)
module.exit_json(**result)
if __name__ == '__main__':
main()
| hryamzik/ansible | lib/ansible/modules/cloud/cloudstack/cs_storage_pool.py | Python | gpl-3.0 | 15,962 |
import numpy as np
import sys
sys.path.append("../Pipeline/Audio/Pipeline/")
from AudioPipe.features import mfcc # Feature Extraction Module, part of the shared preprocessing
import AudioPipe.speaker.recognition as SR # Speaker Recognition Module
import scipy.io.wavfile as wav
import commands, os
import json
import argparse
import warnings
from scipy import stats
def outlier_detect(audio_dir, spk_name):
spk_dir = os.path.join(audio_dir,spk_name)
list_fn = os.path.join(spk_dir,"clip_list.txt")
clip_ls = from_jsonfile(list_fn)
audio_merge = merge_clips(spk_dir, clip_ls)
# Training a model based on the merged audio
Model = SR.GMMRec()
Model.enroll_file(spk_name, audio_merge)
Model.train()
# Score each utterance in the training set
llhd_ls = []
new_ls = []
stat_fn = os.path.join(audio_dir,"stats.json")
if os.path.exists(stat_fn) and os.path.getsize(stat_fn) > 0:
stat_dict = from_jsonfile(stat_fn)
else:
stat_dict = {}
if spk_name not in stat_dict:
stat_dict[spk_name]={}
for clip in clip_ls:
audio_test = os.path.join(spk_dir,clip["name"])
#commands.getstatusoutput("ffmpeg -i "+audio_test+" -vn -f wav -ab 16k "+audio_test)
try:
llhd = Model.predict(Model.get_mfcc(audio_test))[1]
except ValueError:
print clip["name"]
continue
llhd_ls.append(llhd)
clip["llhd"] = llhd
new_ls.append(clip)
z_score = stats.zscore(llhd_ls)
for i in xrange(len(llhd_ls)):
new_ls[i]["zscore"] = z_score[i]
with open(list_fn, "w") as fh:
fh.write(to_json(new_ls, indent=2))
stat_dict[spk_name]["clip_num"]=len(clip_ls)
stat_dict[spk_name]["zpos_num"]=sum(z_score>0)
stat_dict[spk_name]["total_duration"]=sum([get_sec(clp["duration"]) for clp in new_ls])
stat_dict[spk_name]["clean_duration"]=sum([get_sec(clp["duration"]) for clp in new_ls if clp["zscore"]>-0.00001])
with open(stat_fn, "w") as fh:
fh.write(to_json(stat_dict, indent=2))
os.remove(audio_merge)
return llhd_ls
def merge_clips(spk_dir, clip_ls):
# Write the list of clips into a file for merging training data
temp_fl = os.path.join(spk_dir,"temp.txt")
count = 0
with open(temp_fl, "w") as fh:
for clip in clip_ls:
if count>100:
break
fh.write("file "+clip["name"]+"\n")
count+=1
# Merge all the data into one audio
audio_merge = os.path.join(spk_dir,"merged_gross.wav")
commands.getstatusoutput("ffmpeg -f concat -i "+temp_fl.replace(" ", "\ ")+" -c copy -y "+audio_merge)
os.remove(temp_fl)
return audio_merge
def from_jsonfile(filename):
with open(filename) as fh:
return json.load(fh)
def get_sec(time_str):
h, m, s = time_str.split(':')
return int(h) * 3600 + int(m) * 60 + float(s)
def to_json(result, **kwargs):
'''Return a JSON representation of the aligned transcript'''
options = {
'sort_keys': True,
'indent': 4,
'separators': (',', ': '),
}
options.update(kwargs)
return json.dumps(result, **options)
parser = argparse.ArgumentParser(
description='Detect outliers in a training dataset of one speaker.')
parser.add_argument(
'-i', '--input', dest='input_dir', type=str,
help='directory of audio clips')
parser.add_argument(
'-s', '--spk', dest='spk_name', type=str,
help='the name of the speaker')
args = parser.parse_args()
audio_dir = args.input_dir
spk_name = args.spk_name
with warnings.catch_warnings():
warnings.simplefilter("ignore")
outlier_detect(audio_dir, spk_name)
| RedHenLab/Audio | CNN/detect_outlier.py | Python | gpl-2.0 | 3,781 |
from unittest.mock import patch, MagicMock
from django.core.urlresolvers import reverse
from django.test import RequestFactory
from test_plus.test import TestCase
from ..models import (
Minion,
MinionData
)
from ..views import(
MinionCreateView,
MinionDetailView
)
class TestMinionCreateView(TestCase):
def setUp(self):
self.user = self.make_user()
self.factory = RequestFactory()
def test_get(self):
request = self.factory.get(reverse('minions:minion-create'))
request.user = self.user
response = MinionCreateView.as_view()(request)
self.assertEqual(response.status_code, 200)
# self.assertEqual(response.context_data['user'], self.data)
# self.assertEqual(response.context_data['request'], request)
@patch('Minion.save', MagicMock(name="save"))
def test_post(self):
data = {
'name': 'test_minion'
}
request = self.factory.post(reverse('minions:minion-create'), data)
request.user = self.user
response = MinionCreateView.as_view()(request)
self.assertEqual(response.status_code, 302)
self.assertTrue(Minion.save.called)
self.assertEqual(Minion.save.call_count, 1)
| Farforr/overlord | overlord/minions/tests/test_views.py | Python | bsd-3-clause | 1,246 |
from test_helper import plpy_mock_config
def setup():
plpy_mock_config()
| CartoDB/dataservices-api | server/lib/python/cartodb_services/test/__init__.py | Python | bsd-3-clause | 79 |
import pytest
from mock.mock import Mock
from insights.core.dr import SkipComponent
from insights.core.spec_factory import DatasourceProvider
from insights.specs.datasources.lpstat import LocalSpecs, lpstat_protocol_printers_info
LPSTAT_V = """
device for test_printer1: ipp://cups.test.com/printers/test_printer1
device for test_printer2: ipp://cups.test.com/printers/test_printer2
device for test_printer3: socket://192.168.1.5:9100
device for test_printer4: usb://smth
device for test_printer5: ///dev/null
""".strip()
LPSTAT_V_NOT_GOOD = """
lpstat: Transport endpoint is not connected
""".strip()
LPSTAT_V_RESULT = """
device for test_printer1: ipp
device for test_printer2: ipp
device for test_printer3: socket
device for test_printer4: usb
device for test_printer5: ///dev/null
""".strip()
RELATIVE_PATH = 'insights_commands/lpstat_-v'
def test_lpstat_datasource():
lpstat_data = Mock()
lpstat_data.content = LPSTAT_V.splitlines()
broker = {LocalSpecs.lpstat_v: lpstat_data}
result = lpstat_protocol_printers_info(broker)
assert result is not None
assert isinstance(result, DatasourceProvider)
expected = DatasourceProvider(content=LPSTAT_V_RESULT, relative_path=RELATIVE_PATH)
assert result.content == expected.content
assert result.relative_path == expected.relative_path
def test_lpstat_datasource_NG_output():
lpstat_data = Mock()
lpstat_data.content = LPSTAT_V_NOT_GOOD.splitlines()
broker = {LocalSpecs.lpstat_v: lpstat_data}
with pytest.raises(SkipComponent) as e:
lpstat_protocol_printers_info(broker)
assert 'SkipComponent' in str(e)
| RedHatInsights/insights-core | insights/tests/datasources/test_lpstat.py | Python | apache-2.0 | 1,623 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class DefaultValue(Document):
pass
def on_doctype_update():
"""Create indexes for `tabDefaultValue` on `(parent, defkey)`"""
if not frappe.db.sql("""show index from `tabDefaultValue`
where Key_name="defaultvalue_parent_defkey_index" """):
frappe.db.commit()
frappe.db.sql("""alter table `tabDefaultValue`
add index defaultvalue_parent_defkey_index(parent, defkey)""")
if not frappe.db.sql("""show index from `tabDefaultValue`
where Key_name="defaultvalue_parent_parenttype_index" """):
frappe.db.commit()
frappe.db.sql("""alter table `tabDefaultValue`
add index defaultvalue_parent_parenttype_index(parent, parenttype)""")
| gangadharkadam/v5_frappe | frappe/core/doctype/defaultvalue/defaultvalue.py | Python | mit | 850 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'Ipv6SrpEncapsulationEnum' : _MetaInfoEnum('Ipv6SrpEncapsulationEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_cfg',
{
'srpa':'srpa',
'srpb':'srpb',
}, 'Cisco-IOS-XR-ipv6-nd-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-cfg']),
'Ipv6NdMonthEnum' : _MetaInfoEnum('Ipv6NdMonthEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_cfg',
{
'january':'january',
'february':'february',
'march':'march',
'april':'april',
'may':'may',
'june':'june',
'july':'july',
'august':'august',
'september':'september',
'october':'october',
'november':'november',
'december':'december',
}, 'Cisco-IOS-XR-ipv6-nd-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-cfg']),
'Ipv6NdRouterPrefEnum' : _MetaInfoEnum('Ipv6NdRouterPrefEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_cfg',
{
'high':'high',
'medium':'medium',
'low':'low',
}, 'Cisco-IOS-XR-ipv6-nd-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-cfg']),
'Ipv6Neighbor.Neighbors.Neighbor' : {
'meta_info' : _MetaInfoClass('Ipv6Neighbor.Neighbors.Neighbor',
False,
[
_MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address
''',
'neighbor_address',
'Cisco-IOS-XR-ipv6-nd-cfg', True),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [b'(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-ipv6-nd-cfg', True),
_MetaInfoClassMember('encapsulation', REFERENCE_ENUM_CLASS, 'Ipv6SrpEncapsulationEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_cfg', 'Ipv6SrpEncapsulationEnum',
[], [],
''' Encapsulation type only if interface type is
SRP
''',
'encapsulation',
'Cisco-IOS-XR-ipv6-nd-cfg', False),
_MetaInfoClassMember('mac-address', ATTRIBUTE, 'str' , None, None,
[], [b'[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' 48-bit hardware address H.H.H
''',
'mac_address',
'Cisco-IOS-XR-ipv6-nd-cfg', False),
_MetaInfoClassMember('zone', ATTRIBUTE, 'str' , None, None,
[], [],
''' IPv6 address zone
''',
'zone',
'Cisco-IOS-XR-ipv6-nd-cfg', False),
],
'Cisco-IOS-XR-ipv6-nd-cfg',
'neighbor',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_cfg'
),
},
'Ipv6Neighbor.Neighbors' : {
'meta_info' : _MetaInfoClass('Ipv6Neighbor.Neighbors',
False,
[
_MetaInfoClassMember('neighbor', REFERENCE_LIST, 'Neighbor' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_cfg', 'Ipv6Neighbor.Neighbors.Neighbor',
[], [],
''' IPv6 neighbor configuration
''',
'neighbor',
'Cisco-IOS-XR-ipv6-nd-cfg', False),
],
'Cisco-IOS-XR-ipv6-nd-cfg',
'neighbors',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_cfg'
),
},
'Ipv6Neighbor' : {
'meta_info' : _MetaInfoClass('Ipv6Neighbor',
False,
[
_MetaInfoClassMember('neighbors', REFERENCE_CLASS, 'Neighbors' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_cfg', 'Ipv6Neighbor.Neighbors',
[], [],
''' IPv6 neighbors
''',
'neighbors',
'Cisco-IOS-XR-ipv6-nd-cfg', False),
_MetaInfoClassMember('scavenge-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '43200')], [],
''' Set lifetime for stale neighbor
''',
'scavenge_timeout',
'Cisco-IOS-XR-ipv6-nd-cfg', False),
],
'Cisco-IOS-XR-ipv6-nd-cfg',
'ipv6-neighbor',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_cfg'
),
},
}
_meta_table['Ipv6Neighbor.Neighbors.Neighbor']['meta_info'].parent =_meta_table['Ipv6Neighbor.Neighbors']['meta_info']
_meta_table['Ipv6Neighbor.Neighbors']['meta_info'].parent =_meta_table['Ipv6Neighbor']['meta_info']
| 111pontes/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_ipv6_nd_cfg.py | Python | apache-2.0 | 5,876 |
# -*- coding: utf-8 -*-
"""
News for state space models
Author: Chad Fulton
License: BSD-3
"""
import numpy as np
import pandas as pd
from statsmodels.iolib.table import SimpleTable
from statsmodels.iolib.summary import Summary
from statsmodels.iolib.tableformatting import fmt_params
class NewsResults(object):
"""
Impacts of data revisions and news on estimates of variables of interest
Parameters
----------
news_results : SimpleNamespace instance
Results from `KalmanSmoother.news`.
model : MLEResults
The results object associated with the model from which the NewsResults
was generated.
updated : MLEResults
The results object associated with the model containing the updated
dataset.
previous : MLEResults
The results object associated with the model containing the previous
dataset.
impacted_variable : str, list, array, or slice, optional
Observation variable label or slice of labels specifying particular
impacted variables to display in output. The impacted variable(s)
describe the variables that were *affected* by the news. If you do not
know the labels for the variables, check the `endog_names` attribute of
the model instance.
tolerance : float, optional
The numerical threshold for determining zero impact. Default is that
any impact less than 1e-10 is assumed to be zero.
row_labels : iterable
Row labels (often dates) for the impacts of the revisions and news.
Attributes
----------
total_impacts : pd.Series
Updates to forecasts of impacted variables from both news and data
revisions, E[y^i | post] - E[y^i | previous].
update_impacts : pd.Series
Updates to forecasts of impacted variables from the news,
E[y^i | post] - E[y^i | revisions] where y^i are the impacted variables
of interest.
revision_impacts : pd.Series
Updates to forecasts of impacted variables from data revisions,
E[y^i | revisions] - E[y^i | previous].
news : pd.Series
The unexpected component of the updated data,
E[y^u | post] - E[y^u | revisions] where y^u are the updated variables.
weights : pd.Series
Weights describing the effect of news on variables of interest.
update_forecasts : pd.Series
Forecasts based on the previous dataset of the variables that were
updated, E[y^u | previous].
update_realized : pd.Series
Actual observed data associated with the variables that were
updated, y^u
prev_impacted_forecasts : pd.Series
Previous forecast of the variables of interest, E[y^i | previous].
post_impacted_forecasts : pd.Series
Forecast of the variables of interest after taking into account both
revisions and updates, E[y^i | post].
revisions_iloc : pd.DataFrame
The integer locations of the data revisions in the dataset.
revisions_ix : pd.DataFrame
The label-based locations of the data revisions in the dataset.
updates_iloc : pd.DataFrame
The integer locations of the updated data points.
updates_ix : pd.DataFrame
The label-based locations of updated data points.
References
----------
.. [1] Bańbura, Marta, and Michele Modugno.
"Maximum likelihood estimation of factor models on datasets with
arbitrary pattern of missing data."
Journal of Applied Econometrics 29, no. 1 (2014): 133-160.
.. [2] Bańbura, Marta, Domenico Giannone, and Lucrezia Reichlin.
"Nowcasting."
The Oxford Handbook of Economic Forecasting. July 8, 2011.
.. [3] Bańbura, Marta, Domenico Giannone, Michele Modugno, and Lucrezia
Reichlin.
"Now-casting and the real-time data flow."
In Handbook of economic forecasting, vol. 2, pp. 195-237.
Elsevier, 2013.
"""
def __init__(self, news_results, model, updated, previous,
impacted_variable=None, tolerance=1e-10, row_labels=None):
# Note: `model` will be the same as one of `revised` or `previous`, but
# we need to save it as self.model so that the `predict_dates`, which
# were generated by the `_get_prediction_index` call, will be available
# for use by the base wrapping code.
self.model = model
self.updated = updated
self.previous = previous
self.news_results = news_results
self._impacted_variable = impacted_variable
self._tolerance = tolerance
self.row_labels = row_labels
self.params = [] # required for `summary` to work
columns = np.atleast_1d(self.updated.model.endog_names)
# E[y^i | post]
self.post_impacted_forecasts = pd.DataFrame(
news_results.post_impacted_forecasts.T,
index=self.row_labels, columns=columns)
# E[y^i | previous]
self.prev_impacted_forecasts = pd.DataFrame(
news_results.prev_impacted_forecasts.T,
index=self.row_labels, columns=columns)
# E[y^i | post] - E[y^i | revisions]
self.update_impacts = pd.DataFrame(
news_results.update_impacts,
index=self.row_labels, columns=columns)
# E[y^i | revisions] - E[y^i | previous]
self.revision_impacts = pd.DataFrame(
news_results.revision_impacts,
index=self.row_labels, columns=columns)
# E[y^i | post] - E[y^i | previous]
self.total_impacts = (self.post_impacted_forecasts -
self.prev_impacted_forecasts)
# Indices of revisions and updates
index = self.updated.model._index
self.revisions_iloc = pd.DataFrame(
list(zip(*news_results.revisions_ix)),
index=['revision date', 'revised variable']).T
iloc = self.revisions_iloc
if len(iloc) > 0:
self.revisions_ix = pd.DataFrame({
'revision date': index[iloc['revision date']],
'revised variable': columns[iloc['revised variable']]})
else:
self.revisions_ix = iloc.copy()
self.updates_iloc = pd.DataFrame(
list(zip(*news_results.updates_ix)),
index=['update date', 'updated variable']).T
iloc = self.updates_iloc
if len(iloc) > 0:
self.updates_ix = pd.DataFrame({
'update date': index[iloc['update date']],
'updated variable': columns[iloc['updated variable']]})
else:
self.updates_ix = iloc.copy()
# Wrap forecasts and forecasts errors
ix = pd.MultiIndex.from_arrays([self.updates_ix['update date'],
self.updates_ix['updated variable']])
# E[y^u | post] - E[y^u | previous]
if news_results.news is None:
self.news = pd.Series([], index=ix, name='news',
dtype=model.params.dtype)
else:
self.news = pd.Series(news_results.news, index=ix, name='news')
# E[y^u | previous]
if news_results.update_forecasts is None:
self.update_forecasts = pd.Series([], index=ix,
dtype=model.params.dtype)
else:
self.update_forecasts = pd.Series(
news_results.update_forecasts, index=ix)
# y^u
if news_results.update_realized is None:
self.update_realized = pd.Series([], index=ix,
dtype=model.params.dtype)
else:
self.update_realized = pd.Series(
news_results.update_realized, index=ix)
cols = pd.MultiIndex.from_product([self.row_labels, columns])
# reshaped version of gain matrix E[y A'] E[A A']^{-1}
if len(self.updates_iloc):
weights = news_results.gain.transpose(0, 1, 2).reshape(
len(cols), len(ix))
else:
weights = np.zeros((len(cols), len(ix)))
self.weights = pd.DataFrame(weights, index=cols, columns=ix).T
self.weights.columns.names = ['impact date', 'impacted variable']
@property
def impacted_variable(self):
return self._impacted_variable
@impacted_variable.setter
def impacted_variable(self, value):
self._impacted_variable = value
@property
def tolerance(self):
return self._tolerance
@tolerance.setter
def tolerance(self, value):
self._tolerance = value
@property
def data_revisions(self):
"""
Revisions to data points that existed in the previous dataset
Returns
-------
data_revisions : pd.DataFrame
Index is as MultiIndex consisting of `revision date` and
`revised variable`. The columns are:
- `observed (prev)`: the value of the data as it was observed
in the previous dataset.
- `revised`: the revised value of the data, as it is observed
in the new dataset
See also
--------
data_updates
"""
# Save revisions data
data = self.revisions_ix.copy()
data['observed (prev)'] = [
self.previous.model.endog[row[0], row[1]]
for _, row in self.revisions_iloc.iterrows()]
data['revised'] = [
self.updated.model.endog[row[0], row[1]]
for _, row in self.revisions_iloc.iterrows()]
data.index = pd.MultiIndex.from_arrays([data['revision date'],
data['revised variable']])
data = data.sort_index().drop(['revision date',
'revised variable'], axis=1)
return data
@property
def data_updates(self):
"""
Updated data; new entries that did not exist in the previous dataset
Returns
-------
data_updates : pd.DataFrame
Index is as MultiIndex consisting of `update date` and
`updated variable`. The columns are:
- `forecast (prev)`: the previous forecast of the new entry,
based on the information available in the previous dataset
(recall that for these updated data points, the previous dataset
had no observed value for them at all)
- `observed`: the value of the new entry, as it is observed in the
new dataset
See also
--------
data_updates
"""
data = pd.concat([self.update_realized, self.update_forecasts],
axis=1).sort_index().reset_index()
data.columns = (data.columns[:2].tolist() +
['observed', 'forecast (prev)'])
data.index = pd.MultiIndex.from_arrays([data['update date'],
data['updated variable']])
data = data.sort_index().drop(['update date',
'updated variable'], axis=1)
return data
@property
def details_by_impact(self):
"""
Details of forecast revisions from news, organized by impacts first
Returns
-------
details : pd.DataFrame
Index is as MultiIndex consisting of:
- `impact date`: the date of the impact on the variable of interest
- `impacted variable`: the variable that is being impacted
- `update date`: the date of the data update, that results in
`news` that impacts the forecast of variables of interest
- `updated variable`: the variable being updated, that results in
`news` that impacts the forecast of variables of interest
The columns are:
- `forecast (prev)`: the previous forecast of the new entry,
based on the information available in the previous dataset
- `observed`: the value of the new entry, as it is observed in the
new dataset
- `news`: the news associated with the update (this is just the
forecast error: `observed` - `forecast (prev)`)
- `weight`: the weight describing how the `news` effects the
forecast of the variable of interest
- `impact`: the impact of the `news` on the forecast of the
variable of interest
Notes
-----
This table decomposes updated forecasts of variables of interest from
the `news` associated with each updated datapoint from the new data
release.
This table does not summarize the impacts or show the effect of
revisions. That information can be found in the `impacts` table.
This form of the details table is organized so that the impacted
dates / variables are first in the index. This is convenient for
slicing by impacted variables / dates to view the details of data
updates for a particular variable or date.
However, since the `forecast (prev)` and `observed` columns have a lot
of duplication, printing the entire table gives a result that is less
easy to parse than that produced by the `details_by_update` property.
`details_by_update` contains the same information but is organized to
be more convenient for displaying the entire table of detailed updates.
At the same time, `details_by_update` is less convenient for
subsetting.
See Also
--------
details_by_update
impacts
"""
df = self.weights.stack(level=[0, 1]).rename('weight').to_frame()
if len(self.updates_iloc):
df['forecast (prev)'] = self.update_forecasts
df['observed'] = self.update_realized
df['news'] = self.news
df['impact'] = df['news'] * df['weight']
else:
df['forecast (prev)'] = []
df['observed'] = []
df['news'] = []
df['impact'] = []
df = df[['observed', 'forecast (prev)', 'news', 'weight', 'impact']]
df = df.reorder_levels([2, 3, 0, 1]).sort_index()
if self.impacted_variable is not None and len(df) > 0:
df = df.loc[np.s_[:, self.impacted_variable], :]
mask = np.abs(df['weight']) > self.tolerance
return df[mask]
@property
def details_by_update(self):
"""
Details of forecast revisions from news, organized by updates first
Returns
-------
details : pd.DataFrame
Index is as MultiIndex consisting of:
- `update date`: the date of the data update, that results in
`news` that impacts the forecast of variables of interest
- `updated variable`: the variable being updated, that results in
`news` that impacts the forecast of variables of interest
- `forecast (prev)`: the previous forecast of the new entry,
based on the information available in the previous dataset
- `observed`: the value of the new entry, as it is observed in the
new dataset
- `impact date`: the date of the impact on the variable of interest
- `impacted variable`: the variable that is being impacted
The columns are:
- `news`: the news associated with the update (this is just the
forecast error: `observed` - `forecast (prev)`)
- `weight`: the weight describing how the `news` effects the
forecast of the variable of interest
- `impact`: the impact of the `news` on the forecast of the
variable of interest
Notes
-----
This table decomposes updated forecasts of variables of interest from
the `news` associated with each updated datapoint from the new data
release.
This table does not summarize the impacts or show the effect of
revisions. That information can be found in the `impacts` table.
This form of the details table is organized so that the updated
dates / variables are first in the index, and in this table the index
also contains the forecasts and observed values of the updates. This is
convenient for displaying the entire table of detailed updates because
it allows sparsifying duplicate entries.
However, since it includes forecasts and observed values in the index
of the table, it is not convenient for subsetting by the variable of
interest. Instead, the `details_by_impact` property is organized to
make slicing by impacted variables / dates easy. This allows, for
example, viewing the details of data updates on a particular variable
or date of interest.
See Also
--------
details_by_impact
impacts
"""
df = self.weights.stack(level=[0, 1]).rename('weight').to_frame()
if len(self.updates_iloc):
df['forecast (prev)'] = self.update_forecasts
df['observed'] = self.update_realized
df['news'] = self.news
df['impact'] = df['news'] * df['weight']
else:
df['forecast (prev)'] = []
df['observed'] = []
df['news'] = []
df['impact'] = []
df = df[['forecast (prev)', 'observed', 'news',
'weight', 'impact']]
df = df.reset_index()
keys = ['update date', 'updated variable', 'observed',
'forecast (prev)', 'impact date', 'impacted variable']
df.index = pd.MultiIndex.from_arrays([df[key] for key in keys])
details = df.drop(keys, axis=1).sort_index()
if self.impacted_variable is not None and len(df) > 0:
details = details.loc[
np.s_[:, :, :, :, :, self.impacted_variable], :]
mask = np.abs(details['weight']) > self.tolerance
return details[mask]
@property
def impacts(self):
"""
Impacts from news and revisions on all dates / variables of interest
Returns
-------
impacts : pd.DataFrame
Index is as MultiIndex consisting of:
- `impact date`: the date of the impact on the variable of interest
- `impacted variable`: the variable that is being impacted
The columns are:
- `estimate (prev)`: the previous estimate / forecast of the
date / variable of interest.
- `impact of revisions`: the impact of all data revisions on
the estimate of the date / variable of interest.
- `impact of news`: the impact of all news on the estimate of
the date / variable of interest.
- `total impact`: the total impact of both revisions and news on
the estimate of the date / variable of interest.
- `estimate (new)`: the new estimate / forecast of the
date / variable of interest after taking into account the effects
of the revisions and news.
Notes
-----
This table decomposes updated forecasts of variables of interest into
the overall effect from revisions and news.
This table does not break down the detail by the updated
dates / variables. That information can be found in the
`details_by_impact` `details_by_update` tables.
See Also
--------
details_by_impact
details_by_update
"""
# Summary of impacts
impacts = pd.concat([
self.prev_impacted_forecasts.unstack().rename('estimate (prev)'),
self.revision_impacts.unstack().rename('impact of revisions'),
self.update_impacts.unstack().rename('impact of news'),
self.post_impacted_forecasts.unstack().rename('estimate (new)')],
axis=1)
impacts['impact of revisions'] = (
impacts['impact of revisions'].fillna(0))
impacts['impact of news'] = (
impacts['impact of news'].fillna(0))
impacts['total impact'] = (impacts['impact of revisions'] +
impacts['impact of news'])
impacts = impacts.reorder_levels([1, 0]).sort_index()
impacts.index.names = ['impact date', 'impacted variable']
impacts = impacts[['estimate (prev)', 'impact of revisions',
'impact of news', 'total impact', 'estimate (new)']]
if self.impacted_variable is not None:
impacts = impacts.loc[np.s_[:, self.impacted_variable], :]
tmp = np.abs(impacts[['impact of revisions', 'impact of news']])
mask = (tmp > self.tolerance).any(axis=1)
return impacts[mask]
def summary_impacts(self, impact_date=None, impacted_variable=None,
groupby='impact date', show_revisions_columns=None,
sparsify=True, float_format='%.2f'):
"""
Create summary table with detailed impacts from news; by date, variable
Parameters
----------
impact_date : int, str, datetime, list, array, or slice, optional
Observation index label or slice of labels specifying particular
impact periods to display. The impact date(s) describe the periods
in which impacted variables were *affected* by the news. If this
argument is given, the output table will only show this impact date
or dates. Note that this argument is passed to the Pandas `loc`
accessor, and so it should correspond to the labels of the model's
index. If the model was created with data in a list or numpy array,
then these labels will be zero-indexes observation integers.
impacted_variable : str, list, array, or slice, optional
Observation variable label or slice of labels specifying particular
impacted variables to display. The impacted variable(s) describe
the variables that were *affected* by the news. If you do not know
the labels for the variables, check the `endog_names` attribute of
the model instance.
groupby : {impact date, impacted date}
The primary variable for grouping results in the impacts table. The
default is to group by update date.
show_revisions_columns : bool, optional
If set to False, the impacts table will not show the impacts from
data revisions or the total impacts. Default is to show the
revisions and totals columns if any revisions were made and
otherwise to hide them.
sparsify : bool, optional, default True
Set to False for the table to include every one of the multiindex
keys at each row.
float_format : str, optional
Formatter format string syntax for converting numbers to strings.
Default is '%.2f'.
Returns
-------
impacts_table : SimpleTable
Table describing total impacts from both revisions and news. See
the documentation for the `impacts` attribute for more details
about the index and columns.
See Also
--------
impacts
"""
# Squeeze for univariate models
if impacted_variable is None and self.updated.model.k_endog == 1:
impacted_variable = self.updated.model.endog_names
# Default is to only show the revisions columns if there were any
# revisions (otherwise it would just be a column of zeros)
if show_revisions_columns is None:
show_revisions_columns = len(self.revisions_iloc) > 0
# Select only the variables / dates of interest
s = list(np.s_[:, :])
if impact_date is not None:
s[0] = np.s_[impact_date]
if impacted_variable is not None:
s[1] = np.s_[impacted_variable]
s = tuple(s)
impacts = self.impacts.loc[s, :]
# Make the first index level the groupby level
groupby = groupby.lower()
if groupby in ['impacted variable', 'impacted_variable']:
impacts.index = impacts.index.swaplevel(1, 0)
elif groupby not in ['impact date', 'impact_date']:
raise ValueError('Invalid groupby for impacts table. Valid options'
' are "impact date" or "impacted variable".'
f'Got "{groupby}".')
impacts = impacts.sort_index()
# Drop the non-groupby level if there's only one value
tmp_index = impacts.index.remove_unused_levels()
k_vars = len(tmp_index.levels[1])
removed_level = None
if sparsify and k_vars == 1:
name = tmp_index.names[1]
value = tmp_index.levels[1][0]
removed_level = f'{name} = {value}'
impacts.index = tmp_index.droplevel(1)
impacts = impacts.applymap(
lambda num: '' if pd.isnull(num) else float_format % num)
impacts = impacts.reset_index()
impacts.iloc[:, 0] = impacts.iloc[:, 0].map(str)
else:
impacts = impacts.reset_index()
impacts.iloc[:, :2] = impacts.iloc[:, :2].applymap(str)
impacts.iloc[:, 2:] = impacts.iloc[:, 2:].applymap(
lambda num: '' if pd.isnull(num) else float_format % num)
# Sparsify the groupby column
if sparsify and groupby in impacts:
mask = impacts[groupby] == impacts[groupby].shift(1)
tmp = impacts.loc[mask, groupby]
if len(tmp) > 0:
impacts.loc[mask, groupby] = ''
# Drop revisions and totals columns if applicable
if not show_revisions_columns:
impacts.drop(['impact of revisions', 'total impact'], axis=1,
inplace=True)
params_data = impacts.values
params_header = impacts.columns.tolist()
params_stubs = None
title = 'Impacts'
if removed_level is not None:
join = 'on' if groupby == 'date' else 'for'
title += f' {join} [{removed_level}]'
impacts_table = SimpleTable(
params_data, params_header, params_stubs,
txt_fmt=fmt_params, title=title)
return impacts_table
def summary_details(self, impact_date=None, impacted_variable=None,
update_date=None, updated_variable=None,
groupby='update date', sparsify=True,
float_format='%.2f', multiple_tables=False):
"""
Create summary table with detailed impacts from news; by date, variable
Parameters
----------
impact_date : int, str, datetime, list, array, or slice, optional
Observation index label or slice of labels specifying particular
impact periods to display. The impact date(s) describe the periods
in which impacted variables were *affected* by the news. If this
argument is given, the output table will only show this impact date
or dates. Note that this argument is passed to the Pandas `loc`
accessor, and so it should correspond to the labels of the model's
index. If the model was created with data in a list or numpy array,
then these labels will be zero-indexes observation integers.
impacted_variable : str, list, array, or slice, optional
Observation variable label or slice of labels specifying particular
impacted variables to display. The impacted variable(s) describe
the variables that were *affected* by the news. If you do not know
the labels for the variables, check the `endog_names` attribute of
the model instance.
update_date : int, str, datetime, list, array, or slice, optional
Observation index label or slice of labels specifying particular
updated periods to display. The updated date(s) describe the
periods in which the new data points were available that generated
the news). See the note on `impact_date` for details about what
these labels are.
updated_variable : str, list, array, or slice, optional
Observation variable label or slice of labels specifying particular
updated variables to display. The updated variable(s) describe the
variables that were *affected* by the news. If you do not know the
labels for the variables, check the `endog_names` attribute of the
model instance.
groupby : {update date, updated date, impact date, impacted date}
The primary variable for grouping results in the details table. The
default is to group by update date.
sparsify : bool, optional, default True
Set to False for the table to include every one of the multiindex
keys at each row.
float_format : str, optional
Formatter format string syntax for converting numbers to strings.
Default is '%.2f'.
multiple_tables : bool, optional
If set to True, this function will return a list of tables, one
table for each of the unique `groupby` levels. Default is False,
in which case this function returns a single table.
Returns
-------
details_table : SimpleTable or list of SimpleTable
Table or list of tables describing how the news from each update
(i.e. news from a particular variable / date) translates into
changes to the forecasts of each impacted variable variable / date.
This table contains information about the updates and about the
impacts. Updates are newly observed datapoints that were not
available in the previous results set. Each update leads to news,
and the news may cause changes in the forecasts of the impacted
variables. The amount that a particular piece of news (from an
update to some variable at some date) impacts a variable at some
date depends on weights that can be computed from the model
results.
The data contained in this table that refer to updates are:
- `update date` : The date at which a new datapoint was added.
- `updated variable` : The variable for which a new datapoint was
added.
- `forecast (prev)` : The value that had been forecast by the
previous model for the given updated variable and date.
- `observed` : The observed value of the new datapoint.
- `news` : The news is the difference between the observed value
and the previously forecast value for a given updated variable
and date.
The data contained in this table that refer to impacts are:
- `impact date` : A date associated with an impact.
- `impacted variable` : A variable that was impacted by the news.
- `weight` : The weight of news from a given `update date` and
`update variable` on a given `impacted variable` at a given
`impact date`.
- `impact` : The revision to the smoothed estimate / forecast of
the impacted variable at the impact date based specifically on
the news generated by the `updated variable` at the
`update date`.
See Also
--------
details_by_impact
details_by_update
"""
# Squeeze for univariate models
if self.updated.model.k_endog == 1:
if impacted_variable is None:
impacted_variable = self.updated.model.endog_names
if updated_variable is None:
updated_variable = self.updated.model.endog_names
# Select only the variables / dates of interest
s = list(np.s_[:, :, :, :, :, :])
if impact_date is not None:
s[0] = np.s_[impact_date]
if impacted_variable is not None:
s[1] = np.s_[impacted_variable]
if update_date is not None:
s[2] = np.s_[update_date]
if updated_variable is not None:
s[3] = np.s_[updated_variable]
s = tuple(s)
details = self.details_by_impact.loc[s, :]
# Make the first index level the groupby level
groupby = groupby.lower().replace('_', ' ')
groupby_overall = 'impact'
levels_order = [0, 1, 2, 3]
if groupby == 'update date':
levels_order = [2, 3, 0, 1]
groupby_overall = 'update'
elif groupby == 'updated variable':
levels_order = [3, 2, 1, 0]
groupby_overall = 'update'
elif groupby == 'impacted variable':
levels_order = [1, 0, 3, 2]
elif groupby != 'impact date':
raise ValueError('Invalid groupby for details table. Valid options'
' are "update date", "updated variable",'
' "impact date",or "impacted variable".'
f' Got "{groupby}".')
details.index = (details.index.reorder_levels(levels_order)
.remove_unused_levels())
details = details.sort_index()
# If our overall group-by is `update`, move forecast (prev) and
# observed into the index
base_levels = [0, 1, 2, 3]
if groupby_overall == 'update':
details.set_index(['observed', 'forecast (prev)'], append=True,
inplace=True)
details.index = details.index.reorder_levels([0, 1, 4, 5, 2, 3])
base_levels = [0, 1, 4, 5]
# Drop the non-groupby levels if there's only one value
tmp_index = details.index.remove_unused_levels()
n_levels = len(tmp_index.levels)
k_level_values = [len(tmp_index.levels[i]) for i in range(n_levels)]
removed_levels = []
if sparsify:
for i in sorted(base_levels)[::-1][:-1]:
if k_level_values[i] == 1:
name = tmp_index.names[i]
value = tmp_index.levels[i][0]
can_drop = (
(name == 'update date' and update_date is not None) or
(name == 'updated variable' and
updated_variable is not None) or
(name == 'impact date' and impact_date is not None) or
(name == 'impacted variable' and
(impacted_variable is not None or
self.impacted_variable is not None)))
if can_drop or not multiple_tables:
removed_levels.insert(0, f'{name} = {value}')
details.index = tmp_index = tmp_index.droplevel(i)
# Move everything to columns
details = details.reset_index()
# Function for formatting numbers
def str_format(num, mark_ones=False, mark_zeroes=False):
if pd.isnull(num):
out = ''
elif mark_ones and np.abs(1 - num) < self.tolerance:
out = '1.0'
elif mark_zeroes and np.abs(num) < self.tolerance:
out = '0'
else:
out = float_format % num
return out
# Function to create the table
def create_table(details, removed_levels):
# Convert everything to strings
for key in ['observed', 'forecast (prev)', 'news', 'weight',
'impact']:
if key in details:
args = (
# mark_ones
True if key in ['weight'] else False,
# mark_zeroes
True if key in ['weight', 'impact'] else False)
details[key] = details[key].apply(str_format, args=args)
for key in ['update date', 'impact date']:
if key in details:
details[key] = details[key].apply(str)
# Sparsify index columns
if sparsify:
sparsify_cols = ['update date', 'updated variable',
'impact date', 'impacted variable']
if groupby_overall == 'update':
sparsify_cols += ['observed', 'forecast (prev)']
for key in sparsify_cols:
if key in details:
mask = details[key] == details[key].shift(1)
details.loc[mask, key] = ''
params_data = details.values
params_header = details.columns.tolist()
params_stubs = None
title = 'Details'
if len(removed_levels):
title += ' for [' + ', '.join(removed_levels) + ']'
return SimpleTable(params_data, params_header, params_stubs,
txt_fmt=fmt_params, title=title)
if multiple_tables:
details_table = []
for item in details[groupby].unique():
mask = details[groupby] == item
item_details = details[mask].drop(groupby, axis=1)
item_removed_levels = [f'{groupby} = {item}'] + removed_levels
details_table.append(create_table(item_details,
item_removed_levels))
else:
details_table = create_table(details, removed_levels)
return details_table
def summary_revisions(self, sparsify=True):
"""
Create summary table showing revisions to the previous results' data
Parameters
----------
sparsify : bool, optional, default True
Set to False for the table to include every one of the multiindex
keys at each row.
Returns
-------
revisions_table : SimpleTable
Table showing revisions to the previous results' data. Columns are:
- `revision date` : date associated with a revised data point
- `revised variable` : variable that was revised at `revision date`
- `observed (prev)` : the observed value prior to the revision
- `revised` : the new value after the revision
"""
data = self.data_revisions.sort_index().reset_index()
data[['revision date', 'revised variable']] = (
data[['revision date', 'revised variable']].applymap(str))
data.iloc[:, 2:] = data.iloc[:, 2:].applymap(
lambda num: '' if pd.isnull(num) else '%.2f' % num)
# Sparsify the date column
if sparsify:
mask = data['revision date'] == data['revision date'].shift(1)
data.loc[mask, 'revision date'] = ''
params_data = data.values
params_header = data.columns.tolist()
params_stubs = None
title = 'Revisions to dataset:'
revisions_table = SimpleTable(
params_data, params_header, params_stubs,
txt_fmt=fmt_params, title=title)
return revisions_table
def summary_news(self, sparsify=True):
"""
Create summary table showing news from new data since previous results
Parameters
----------
sparsify : bool, optional, default True
Set to False for the table to include every one of the multiindex
keys at each row.
Returns
-------
updates_table : SimpleTable
Table showing new datapoints that were not in the previous results'
data. Columns are:
- `update date` : date associated with a new data point.
- `updated variable` : variable for which new data was added at
`update date`.
- `forecast (prev)` : the forecast value for the updated variable
at the update date in the previous results object (i.e. prior to
the data being available).
- `observed` : the observed value of the new datapoint.
See Also
--------
data_updates
"""
data = pd.merge(
self.data_updates, self.news, left_index=True,
right_index=True).sort_index().reset_index()
data[['update date', 'updated variable']] = (
data[['update date', 'updated variable']].applymap(str))
data.iloc[:, 2:] = data.iloc[:, 2:].applymap(
lambda num: '' if pd.isnull(num) else '%.2f' % num)
# Sparsify the date column
if sparsify:
mask = data['update date'] == data['update date'].shift(1)
data.loc[mask, 'update date'] = ''
params_data = data.values
params_header = data.columns.tolist()
params_stubs = None
title = 'News from updated observations:'
updates_table = SimpleTable(
params_data, params_header, params_stubs,
txt_fmt=fmt_params, title=title)
return updates_table
def summary(self, impact_date=None, impacted_variable=None,
update_date=None, updated_variable=None,
impacts_groupby='impact date', details_groupby='update date',
show_revisions_columns=None, sparsify=True,
include_details_tables=None, include_revisions_tables=False,
float_format='%.2f'):
"""
Create summary tables describing news and impacts
Parameters
----------
impact_date : int, str, datetime, list, array, or slice, optional
Observation index label or slice of labels specifying particular
impact periods to display. The impact date(s) describe the periods
in which impacted variables were *affected* by the news. If this
argument is given, the impact and details tables will only show
this impact date or dates. Note that this argument is passed to the
Pandas `loc` accessor, and so it should correspond to the labels of
the model's index. If the model was created with data in a list or
numpy array, then these labels will be zero-indexes observation
integers.
impacted_variable : str, list, array, or slice, optional
Observation variable label or slice of labels specifying particular
impacted variables to display. The impacted variable(s) describe
the variables that were *affected* by the news. If you do not know
the labels for the variables, check the `endog_names` attribute of
the model instance.
update_date : int, str, datetime, list, array, or slice, optional
Observation index label or slice of labels specifying particular
updated periods to display. The updated date(s) describe the
periods in which the new data points were available that generated
the news). See the note on `impact_date` for details about what
these labels are.
updated_variable : str, list, array, or slice, optional
Observation variable label or slice of labels specifying particular
updated variables to display. The updated variable(s) describe the
variables that were *affected* by the news. If you do not know the
labels for the variables, check the `endog_names` attribute of the
model instance.
impacts_groupby : {impact date, impacted date}
The primary variable for grouping results in the impacts table. The
default is to group by update date.
details_groupby : str
One of "update date", "updated date", "impact date", or
"impacted date". The primary variable for grouping results in the
details table. Only used if the details tables are included. The
default is to group by update date.
show_revisions_columns : bool, optional
If set to False, the impacts table will not show the impacts from
data revisions or the total impacts. Default is to show the
revisions and totals columns if any revisions were made and
otherwise to hide them.
sparsify : bool, optional, default True
Set to False for the table to include every one of the multiindex
keys at each row.
include_details_tables : bool, optional
If set to True, the summary will show tables describing the details
of how news from specific updates translate into specific impacts.
These tables can be very long, particularly in cases where there
were many updates and in multivariate models. The default is to
show detailed tables only for univariate models.
include_revisions_tables : bool, optional
If set to True, the summary will show tables describing the
revisions and updates that lead to impacts on variables of
interest.
float_format : str, optional
Formatter format string syntax for converting numbers to strings.
Default is '%.2f'.
Returns
-------
summary_tables : Summary
Summary tables describing news and impacts. Basic tables include:
- A table with general information about the sample.
- A table describing the impacts of revisions and news.
- Tables describing revisions in the dataset since the previous
results set (unless `include_revisions_tables=False`).
In univariate models or if `include_details_tables=True`, one or
more tables will additionally be included describing the details
of how news from specific updates translate into specific impacts.
See Also
--------
summary_impacts
summary_details
summary_revisions
summary_updates
"""
# Default for include_details_tables
if include_details_tables is None:
include_details_tables = self.updated.model.k_endog == 1
# Model specification results
model = self.model.model
title = 'News'
def get_sample(model):
if model._index_dates:
ix = model._index
d = ix[0]
sample = ['%s' % d]
d = ix[-1]
sample += ['- ' + '%s' % d]
else:
sample = [str(0), ' - ' + str(model.nobs)]
return sample
previous_sample = get_sample(self.previous.model)
revised_sample = get_sample(self.updated.model)
# Standardize the model name as a list of str
model_name = model.__class__.__name__
# Top summary table
top_left = [('Model:', [model_name]),
('Date:', None),
('Time:', None)]
top_right = [
('Original sample:', [previous_sample[0]]),
('', [previous_sample[1]]),
('Update through:', [revised_sample[1][2:]]),
('No. Revisions:', [len(self.revisions_ix)]),
('No. New datapoints:', [len(self.updates_ix)])]
summary = Summary()
self.model.endog_names = self.model.model.endog_names
summary.add_table_2cols(self, gleft=top_left, gright=top_right,
title=title)
table_ix = 1
# Impact table
summary.tables.insert(table_ix, self.summary_impacts(
impact_date=impact_date, impacted_variable=impacted_variable,
groupby=impacts_groupby,
show_revisions_columns=show_revisions_columns, sparsify=sparsify,
float_format=float_format))
table_ix += 1
# News table
if len(self.updates_iloc) > 0:
summary.tables.insert(
table_ix, self.summary_news(sparsify=sparsify))
table_ix += 1
# Detail tables
multiple_tables = self.updated.model.k_endog > 1
details_tables = self.summary_details(
impact_date=impact_date, impacted_variable=impacted_variable,
groupby=details_groupby, sparsify=sparsify,
float_format=float_format, multiple_tables=multiple_tables)
if not multiple_tables:
details_tables = [details_tables]
if include_details_tables:
for table in details_tables:
summary.tables.insert(table_ix, table)
table_ix += 1
# Revisions
if include_revisions_tables and len(self.revisions_iloc) > 0:
summary.tables.insert(
table_ix, self.summary_revisions(sparsify=sparsify))
table_ix += 1
return summary
| jseabold/statsmodels | statsmodels/tsa/statespace/news.py | Python | bsd-3-clause | 49,282 |
# ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from neon.initializers import Constant, Gaussian
from neon.layers import Conv, Dropout, Pooling, Affine, GeneralizedCost
from neon.models import Model
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti
def create_network():
# weight initialization
g1 = Gaussian(scale=0.01)
g5 = Gaussian(scale=0.005)
c0 = Constant(0)
c1 = Constant(1)
# model initialization
padding = {'pad_d': 1, 'pad_h': 1, 'pad_w': 1}
strides = {'str_d': 2, 'str_h': 2, 'str_w': 2}
layers = [
Conv((3, 3, 3, 64), padding=padding, init=g1, bias=c0, activation=Rectlin()),
Pooling((1, 2, 2), strides={'str_d': 1, 'str_h': 2, 'str_w': 2}),
Conv((3, 3, 3, 128), padding=padding, init=g1, bias=c1, activation=Rectlin()),
Pooling((2, 2, 2), strides=strides),
Conv((3, 3, 3, 256), padding=padding, init=g1, bias=c1, activation=Rectlin()),
Pooling((2, 2, 2), strides=strides),
Conv((3, 3, 3, 256), padding=padding, init=g1, bias=c1, activation=Rectlin()),
Pooling((2, 2, 2), strides=strides),
Conv((3, 3, 3, 256), padding=padding, init=g1, bias=c1, activation=Rectlin()),
Pooling((2, 2, 2), strides=strides),
Affine(nout=2048, init=g5, bias=c1, activation=Rectlin()),
Dropout(keep=0.5),
Affine(nout=2048, init=g5, bias=c1, activation=Rectlin()),
Dropout(keep=0.5),
Affine(nout=101, init=g1, bias=c0, activation=Softmax())
]
return Model(layers=layers), GeneralizedCost(costfunc=CrossEntropyMulti())
| NervanaSystems/neon | examples/video-c3d/network.py | Python | apache-2.0 | 2,289 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
QtWidgets.py
---------------------
Date : November 2015
Copyright : (C) 2015 by Matthias Kuhn
Email : matthias at opengis dot ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matthias Kuhn'
__date__ = 'November 2015'
__copyright__ = '(C) 2015, Matthias Kuhn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt5.QtWidgets import *
QLayout.setMargin = lambda self, m: self.setContentsMargins(m, m, m, m)
| dwadler/QGIS | python/PyQt/PyQt5/QtWidgets.py | Python | gpl-2.0 | 1,207 |
#! /usr/bin/env python
# $Id: test_option_lists.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Tests for states.py.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['option_lists'] = [
["""\
Short options:
-a option -a
-b file option -b
-c name option -c
""",
"""\
<document source="test data">
<paragraph>
Short options:
<option_list>
<option_list_item>
<option_group>
<option>
<option_string>
-a
<description>
<paragraph>
option -a
<option_list_item>
<option_group>
<option>
<option_string>
-b
<option_argument delimiter=" ">
file
<description>
<paragraph>
option -b
<option_list_item>
<option_group>
<option>
<option_string>
-c
<option_argument delimiter=" ">
name
<description>
<paragraph>
option -c
"""],
["""\
Long options:
--aaaa option --aaaa
--bbbb=file option --bbbb
--cccc name option --cccc
--d-e-f-g option --d-e-f-g
--h_i_j_k option --h_i_j_k
""",
"""\
<document source="test data">
<paragraph>
Long options:
<option_list>
<option_list_item>
<option_group>
<option>
<option_string>
--aaaa
<description>
<paragraph>
option --aaaa
<option_list_item>
<option_group>
<option>
<option_string>
--bbbb
<option_argument delimiter="=">
file
<description>
<paragraph>
option --bbbb
<option_list_item>
<option_group>
<option>
<option_string>
--cccc
<option_argument delimiter=" ">
name
<description>
<paragraph>
option --cccc
<option_list_item>
<option_group>
<option>
<option_string>
--d-e-f-g
<description>
<paragraph>
option --d-e-f-g
<option_list_item>
<option_group>
<option>
<option_string>
--h_i_j_k
<description>
<paragraph>
option --h_i_j_k
"""],
["""\
Old GNU-style options:
+a option +a
+b file option +b
+c name option +c
""",
"""\
<document source="test data">
<paragraph>
Old GNU-style options:
<option_list>
<option_list_item>
<option_group>
<option>
<option_string>
+a
<description>
<paragraph>
option +a
<option_list_item>
<option_group>
<option>
<option_string>
+b
<option_argument delimiter=" ">
file
<description>
<paragraph>
option +b
<option_list_item>
<option_group>
<option>
<option_string>
+c
<option_argument delimiter=" ">
name
<description>
<paragraph>
option +c
"""],
["""\
VMS/DOS-style options:
/A option /A
/B file option /B
/CCC option /CCC
/DDD string option /DDD
/EEE=int option /EEE
""",
"""\
<document source="test data">
<paragraph>
VMS/DOS-style options:
<option_list>
<option_list_item>
<option_group>
<option>
<option_string>
/A
<description>
<paragraph>
option /A
<option_list_item>
<option_group>
<option>
<option_string>
/B
<option_argument delimiter=" ">
file
<description>
<paragraph>
option /B
<option_list_item>
<option_group>
<option>
<option_string>
/CCC
<description>
<paragraph>
option /CCC
<option_list_item>
<option_group>
<option>
<option_string>
/DDD
<option_argument delimiter=" ">
string
<description>
<paragraph>
option /DDD
<option_list_item>
<option_group>
<option>
<option_string>
/EEE
<option_argument delimiter="=">
int
<description>
<paragraph>
option /EEE
"""],
["""\
Mixed short, long, and VMS/DOS options:
-a option -a
--bbbb=file option -bbbb
/C option /C
--dddd name option --dddd
-e string option -e
/F file option /F
""",
"""\
<document source="test data">
<paragraph>
Mixed short, long, and VMS/DOS options:
<option_list>
<option_list_item>
<option_group>
<option>
<option_string>
-a
<description>
<paragraph>
option -a
<option_list_item>
<option_group>
<option>
<option_string>
--bbbb
<option_argument delimiter="=">
file
<description>
<paragraph>
option -bbbb
<option_list_item>
<option_group>
<option>
<option_string>
/C
<description>
<paragraph>
option /C
<option_list_item>
<option_group>
<option>
<option_string>
--dddd
<option_argument delimiter=" ">
name
<description>
<paragraph>
option --dddd
<option_list_item>
<option_group>
<option>
<option_string>
-e
<option_argument delimiter=" ">
string
<description>
<paragraph>
option -e
<option_list_item>
<option_group>
<option>
<option_string>
/F
<option_argument delimiter=" ">
file
<description>
<paragraph>
option /F
"""],
["""\
Aliased options:
-a, --aaaa, /A option -a, --aaaa, /A
-b file, --bbbb=file, /B file option -b, --bbbb, /B
""",
"""\
<document source="test data">
<paragraph>
Aliased options:
<option_list>
<option_list_item>
<option_group>
<option>
<option_string>
-a
<option>
<option_string>
--aaaa
<option>
<option_string>
/A
<description>
<paragraph>
option -a, --aaaa, /A
<option_list_item>
<option_group>
<option>
<option_string>
-b
<option_argument delimiter=" ">
file
<option>
<option_string>
--bbbb
<option_argument delimiter="=">
file
<option>
<option_string>
/B
<option_argument delimiter=" ">
file
<description>
<paragraph>
option -b, --bbbb, /B
"""],
["""\
Multiple lines in descriptions, aligned:
-a option -a, line 1
line 2
-b file option -b, line 1
line 2
""",
"""\
<document source="test data">
<paragraph>
Multiple lines in descriptions, aligned:
<option_list>
<option_list_item>
<option_group>
<option>
<option_string>
-a
<description>
<paragraph>
option -a, line 1
line 2
<option_list_item>
<option_group>
<option>
<option_string>
-b
<option_argument delimiter=" ">
file
<description>
<paragraph>
option -b, line 1
line 2
"""],
["""\
Multiple lines in descriptions, not aligned:
-a option -a, line 1
line 2
-b file option -b, line 1
line 2
""",
"""\
<document source="test data">
<paragraph>
Multiple lines in descriptions, not aligned:
<option_list>
<option_list_item>
<option_group>
<option>
<option_string>
-a
<description>
<paragraph>
option -a, line 1
line 2
<option_list_item>
<option_group>
<option>
<option_string>
-b
<option_argument delimiter=" ">
file
<description>
<paragraph>
option -b, line 1
line 2
"""],
["""\
Descriptions begin on next line:
-a
option -a, line 1
line 2
-b file
option -b, line 1
line 2
""",
"""\
<document source="test data">
<paragraph>
Descriptions begin on next line:
<option_list>
<option_list_item>
<option_group>
<option>
<option_string>
-a
<description>
<paragraph>
option -a, line 1
line 2
<option_list_item>
<option_group>
<option>
<option_string>
-b
<option_argument delimiter=" ">
file
<description>
<paragraph>
option -b, line 1
line 2
"""],
["""\
Multiple body elements in descriptions:
-a option -a, para 1
para 2
-b file
option -b, para 1
para 2
""",
"""\
<document source="test data">
<paragraph>
Multiple body elements in descriptions:
<option_list>
<option_list_item>
<option_group>
<option>
<option_string>
-a
<description>
<paragraph>
option -a, para 1
<paragraph>
para 2
<option_list_item>
<option_group>
<option>
<option_string>
-b
<option_argument delimiter=" ">
file
<description>
<paragraph>
option -b, para 1
<paragraph>
para 2
"""],
["""\
--option
empty item above, no blank line
""",
"""\
<document source="test data">
<paragraph>
--option
empty item above, no blank line
"""],
["""\
An option list using equals:
--long1=arg1 Description 1
--long2=arg2 Description 2
An option list using spaces:
--long1 arg1 Description 1
--long2 arg2 Description 2
An option list using mixed delimiters:
--long1=arg1 Description 1
--long2 arg2 Description 2
An option list using mixed delimiters in one line:
--long1=arg1, --long2 arg2 Description
""",
"""\
<document source="test data">
<paragraph>
An option list using equals:
<option_list>
<option_list_item>
<option_group>
<option>
<option_string>
--long1
<option_argument delimiter="=">
arg1
<description>
<paragraph>
Description 1
<option_list_item>
<option_group>
<option>
<option_string>
--long2
<option_argument delimiter="=">
arg2
<description>
<paragraph>
Description 2
<paragraph>
An option list using spaces:
<option_list>
<option_list_item>
<option_group>
<option>
<option_string>
--long1
<option_argument delimiter=" ">
arg1
<description>
<paragraph>
Description 1
<option_list_item>
<option_group>
<option>
<option_string>
--long2
<option_argument delimiter=" ">
arg2
<description>
<paragraph>
Description 2
<paragraph>
An option list using mixed delimiters:
<option_list>
<option_list_item>
<option_group>
<option>
<option_string>
--long1
<option_argument delimiter="=">
arg1
<description>
<paragraph>
Description 1
<option_list_item>
<option_group>
<option>
<option_string>
--long2
<option_argument delimiter=" ">
arg2
<description>
<paragraph>
Description 2
<paragraph>
An option list using mixed delimiters in one line:
<option_list>
<option_list_item>
<option_group>
<option>
<option_string>
--long1
<option_argument delimiter="=">
arg1
<option>
<option_string>
--long2
<option_argument delimiter=" ">
arg2
<description>
<paragraph>
Description
"""],
["""\
Some edge cases:
--option=arg arg too many arguments
--option=arg,arg not supported (yet?)
--option=arg=arg too many arguments
--option arg arg too many arguments
-a letter arg2 too many arguments
/A letter arg2 too many arguments
--option= argument missing
--=argument option missing
-- everything missing
- this should be a bullet list item
These next ones should be simple paragraphs:
-1
--option
--1
-1 and this one too.
""",
"""\
<document source="test data">
<paragraph>
Some edge cases:
<paragraph>
--option=arg arg too many arguments
<paragraph>
--option=arg,arg not supported (yet?)
<paragraph>
--option=arg=arg too many arguments
<paragraph>
--option arg arg too many arguments
<paragraph>
-a letter arg2 too many arguments
<paragraph>
/A letter arg2 too many arguments
<paragraph>
--option= argument missing
<paragraph>
--=argument option missing
<paragraph>
-- everything missing
<bullet_list bullet="-">
<list_item>
<paragraph>
this should be a bullet list item
<paragraph>
These next ones should be simple paragraphs:
<paragraph>
-1
<paragraph>
--option
<paragraph>
--1
<paragraph>
-1 and this one too.
"""],
["""\
Complex optargs:
--source-url=<URL> Use the supplied <URL> verbatim.
--output-encoding=<name[:handler]>, -o<name[:handler]>
Specify the text encoding for output.
-f <[path]filename> Send output to file.
-d <src dest> Use diff from <src> to <dest>.
--bogus=<x y z> Bogus 3D coordinates.
""",
"""\
<document source="test data">
<paragraph>
Complex optargs:
<option_list>
<option_list_item>
<option_group>
<option>
<option_string>
--source-url
<option_argument delimiter="=">
<URL>
<description>
<paragraph>
Use the supplied <URL> verbatim.
<option_list_item>
<option_group>
<option>
<option_string>
--output-encoding
<option_argument delimiter="=">
<name[:handler]>
<option>
<option_string>
-o
<option_argument delimiter="">
<name[:handler]>
<description>
<paragraph>
Specify the text encoding for output.
<option_list_item>
<option_group>
<option>
<option_string>
-f
<option_argument delimiter=" ">
<[path]filename>
<description>
<paragraph>
Send output to file.
<option_list_item>
<option_group>
<option>
<option_string>
-d
<option_argument delimiter=" ">
<src dest>
<description>
<paragraph>
Use diff from <src> to <dest>.
<option_list_item>
<option_group>
<option>
<option_string>
--bogus
<option_argument delimiter="=">
<x y z>
<description>
<paragraph>
Bogus 3D coordinates.
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| spreeker/democracygame | external_apps/docutils-snapshot/test/test_parsers/test_rst/test_option_lists.py | Python | bsd-3-clause | 19,677 |
"""
A Jinja2-based tag engine for tags.
"""
import asyncio
import inspect
import random
import string
from concurrent.futures import ThreadPoolExecutor
import discord
import functools
import lupa
from discord.abc import GuildChannel
from jinja2 import Template
from jinja2.sandbox import SandboxedEnvironment
from lupa._lupa import LuaRuntime
from joku.cogs.lua import sandbox_preamble, dictify_table_recursively, NO_RESULT
from joku.core.bot import Context, Jokusoramame
from joku.core.mp2 import ProcessPoolExecutor
from joku.db.tables import Tag
class TagEngine(object):
def __init__(self, bot: Jokusoramame):
# Template environment.
# This is a SandboxedEnvironment for security purposes.
self.tmpl_env = SandboxedEnvironment()
# The process pool used.
self.executor = ProcessPoolExecutor()
# The bot instance.
# We use this for getting the tag instance.
self.bot = bot
# Update the globals of the template environment.
self.tmpl_env.globals.update(
{
"random": random,
"string": string,
"list": list,
"str": str,
"tuple": tuple,
}
)
@staticmethod
def _lua_render_template(luastr: str, kwargs=None):
"""
Renders a Lua template.
"""
def getter(obj, attr_name):
if attr_name.startswith("_"):
raise AttributeError("Not allowed to access attribute `{}` of `{}`"
.format(attr_name, type(obj).__name__))
return attr_name
def setter(obj, attr_name, value):
raise AttributeError("Python object attribute setting is forbidden")
# the attribute_handlers are probably enough to prevent access eval otherwise
lua = LuaRuntime(register_eval=False,
unpack_returned_tuples=True,
attribute_handlers=(getter, setter))
# execute the sandbox preamble
sandbox = lua.execute(sandbox_preamble)
# call sandbox.run with `glob.sandbox, code`
# and unpack the variables
new = {}
# HECK
for key, val in kwargs.items():
new[key] = lua.table_from(val)
_ = sandbox.run(luastr, lua.table_from(new))
if isinstance(_, bool):
# idk
return NO_RESULT
called, result = _
if lupa.lua_type(result) == 'table':
# dictify
result = dictify_table_recursively(result)
return str(result)
@staticmethod
def _pp_render_template(tmpl_env: SandboxedEnvironment, tag: Tag, kwargs=None):
"""
Called inside the process pool to render the template.
"""
template = tmpl_env.from_string(tag.content or "Broken tag!") # type: Template
# variables = tag.get("variables", {})
# def _set_variable(name, value):
# variables[name] = value
# local = {
# "set_variable": _set_variable,
# **variables,
# }
# if kwargs:
# local.update(kwargs)
rendered = template.render(**kwargs)
return rendered
async def _render_template(self, tag: Tag, **kwargs):
"""
Renders the template in a process pool.
"""
if tag.lua:
partial = functools.partial(self._lua_render_template, tag.content, kwargs)
else:
partial = functools.partial(self._pp_render_template, self.tmpl_env, tag, kwargs)
rendered = await asyncio.wait_for(self.bot.loop.run_in_executor(self.executor, partial), 5, loop=self.bot.loop)
return rendered
async def render_template(self, tag_id: str, ctx: Context = None, guild: discord.Guild = None,
**kwargs) -> str:
"""
Renders a template.
This will load all variables, render the template, and return the rendered template as output.
"""
guild = guild or ctx.message.guild
tag = await self.bot.database.get_tag(guild, tag_id)
if not tag:
return None
final_template = await self._render_template(tag, **kwargs)
# await self.bot.database.save_tag(guild, tag_id, content=tag.get("content"),
# variables=new_variables)
return final_template
| MJB47/Jokusoramame | joku/core/tagengine.py | Python | mit | 4,461 |
import sys
from collections import defaultdict
from django.core.management.base import BaseCommand
from django.core.management import call_command
from wazimap.data.utils import get_session
from wazimap.data.utils import get_datatable
from wazimap.models import FieldTable, SimpleTable
from wazimap.geo import geo_data
import logging
logging.basicConfig()
logging.getLogger('sqlalchemy.engine').setLevel(logging.WARN)
"""
This is a helper script that checks the tables in the DB for missing geo entries,
and missing keys for fields. The tables with missing values will be output after completion.
If the store_missing_entries flag is passed, the missing items will be populated in the DB.
Missing geos are populated with null values for all keys,
and missing key values are populated with 0.
"""
WC_ONLY_TABLES = [
]
CHECK_TABLES = [
]
class Command(BaseCommand):
help = ("Checks the database for completeness (or a single table if passed)." +
"Populates the missing entries if --store-missing-entries is passed")
def add_arguments(self, parser):
parser.add_argument(
'--table',
action='store',
dest='table',
default=None,
help='Database table name to check, rather than checking everything'
)
parser.add_argument(
'--geo-version',
action='store',
dest='geo_version',
default='2011',
help='geo_version of the data which should be checked.'
)
parser.add_argument(
'--store-missing-entries',
action='store_true',
dest='store_missing_entries',
default=False,
help="Write data for missing entries to DB",
)
parser.add_argument(
'--dry-run',
action='store_true',
dest='dryrun',
default=False,
help="Dry-run, don't actually write any data.",
)
parser.add_argument(
'--dumppsql',
action='store_true',
dest='dumppsql',
default=False,
help="Perform dump of table into sql directory after writing data to the db.",
)
def debug(self, msg):
if self.verbosity >= 2:
self.stdout.write(str(msg))
def handle(self, *args, **options):
self.session = get_session()
self.verbosity = options.get('verbosity', 0)
self.table_id = options.get('table')
self.geo_version = options.get('geo_version')
self.store_missing_entries = options.get('store_missing_entries', False)
self.dryrun = options.get('dryrun')
self.dumppsql = options.get('dumppsql')
self.geos = self.get_geos(self.geo_version)
self.db_tables = []
self.simple_tables = {}
self.field_tables = {}
self.missing_key_tables = []
self.missing_geo_tables = []
self.missing_geo_simple_tables = []
if self.table_id:
# use normal django models method here
table = get_datatable(self.table_id)
if type(table) == FieldTable:
self.field_tables[table.name.lower()] = table
else:
self.simple_tables[table.name.lower()] = table
elif CHECK_TABLES:
for table_name in CHECK_TABLES:
data_table = get_datatable(table_name)
if type(data_table) == FieldTable:
self.field_tables[data_table.name.lower()] = data_table
else:
self.simple_tables[data_table.name.lower()] = data_table
else:
self.field_tables = {t.name.lower(): t for t in FieldTable.objects.all()}
self.simple_tables = {t.name.lower(): t for t in SimpleTable.objects.all()}
for table_id, data_table in self.field_tables.iteritems():
fields = data_table.fields
for release in data_table.releases():
db_table = data_table.get_db_table(release=release)
if db_table.name in self.db_tables:
# Multiple field tables can refer to the same underlying db table,
# and we only want to check the db tables once.
continue
self.db_tables.append(db_table.name)
self.stdout.write("Checking table: %s" % (db_table.name))
key_combos = self.get_table_keys(db_table, fields)
rows = self.session.query(db_table.model).filter(db_table.model.geo_version == self.geo_version).all()
missing_keys = self.get_missing_keys(db_table, rows, fields, key_combos)
if missing_keys:
self.missing_key_tables.append(db_table.name)
if self.store_missing_entries:
self.store_missing_keys(db_table, fields, missing_keys)
missing_geos = self.get_missing_geos(db_table, rows)
if missing_geos:
self.missing_geo_tables.append(db_table.name)
if self.store_missing_entries:
self.store_missing_geos(db_table, fields, key_combos, missing_geos)
# Simple Tables
for table_id, data_table in self.simple_tables.iteritems():
self.stdout.write("Checking table: %s" % (data_table.name))
for release in data_table.releases():
db_table = data_table.get_db_table(release=release)
rows = self.session.query(db_table.model).filter(db_table.model.geo_version == self.geo_version).all()
missing_geos = self.get_missing_geos(db_table, rows)
if missing_geos:
self.missing_geo_simple_tables.append(db_table.name)
self.store_simple_table_missing_geos(db_table, missing_geos)
# Basic output if we didn't write anything to the db
if self.missing_key_tables:
self.stdout.write("Tables with missing keys:")
for table_name in self.missing_key_tables:
self.stdout.write("%s" % (table_name))
if self.missing_geo_tables:
self.stdout.write("Missing geos for tables:")
for table_name in self.missing_geo_tables:
self.stdout.write("%s" % table_name)
if self.missing_geo_simple_tables:
self.stdout.write("Missing geos for Simple tables:")
for table_name in self.missing_geo_simple_tables:
self.stdout.write("%s" % table_name)
if not self.store_missing_entries:
self.stdout.write("Run command with --store-missing-entries to populate missing keys with 0 and missing geos with null")
self.session.close()
def get_geos(self, geo_version):
return geo_data.geo_model.objects.filter(version=geo_version)
def get_table_keys(self, table, fields):
# Return a list with all permuations of the keys for all fields
keys = []
for obj in self.session.query(table.model).distinct(*fields):
keys.append([obj.__getattribute__(field) for field in fields])
return keys
def get_missing_keys(self, table, rows, fields, key_combos):
keys_by_geo = defaultdict(list)
for row in rows:
# Get existing key combinations by geo_id
keys_by_geo[row.geo_level + '-' + row.geo_code].append(
[row.__getattribute__(field) for field in fields])
missing_keys_by_geo = {}
for geo_id, geo_keys in keys_by_geo.iteritems():
if len(geo_keys) != len(key_combos):
missing_keys_by_geo[geo_id] = [
x for x in key_combos if x not in geo_keys]
return missing_keys_by_geo
def get_missing_geos(self, table, rows):
table_geos = set((row.geo_level, row.geo_code) for row in rows)
if not table_geos:
sys.exit("Empty table: %s" % (table.name))
if table.name in WC_ONLY_TABLES:
req_geos = set(
(g.geo_level, g.geo_code) for g in self.wc_geos)
else:
req_geos = set((g.geo_level, g.geo_code) for g in self.geos)
missing_geos = [g for g in req_geos if g not in table_geos]
return missing_geos
def store_missing_keys(self, db_table, fields, missing_keys):
count = 0
self.stdout.write("Storing missing keys for : %s" % (db_table.name))
for geo, keys in missing_keys.iteritems():
for key_values in keys:
count += 1
geo_level, geo_code = geo.split('-')
row = {
'geo_level': geo_level,
'geo_code': geo_code,
'total': 0,
'geo_version': self.geo_version}
for i, field in enumerate(fields):
row[field] = key_values[i]
entry = db_table.model(**row)
if not self.dryrun:
self.session.add(entry)
if count % 100 == 0:
self.session.flush()
if not self.dryrun:
self.session.commit()
if self.dumppsql:
with open('sql/{0}.sql'.format(db_table.name), 'w') as f:
call_command('dumppsql', table=db_table.name, stdout=f)
def store_missing_geos(self, db_table, fields, key_combos, missing_geos):
count = 0
self.stdout.write("Storing missing geos for : %s" % (db_table.name))
for geo in missing_geos:
# Entry for each possible key combination for each missing geo
for key_value in key_combos:
count += 1
row = {
'geo_level': geo[0],
'geo_code': geo[1],
'total': None,
'geo_version': self.geo_version}
for i, field in enumerate(fields):
row[field] = key_value[i]
entry = db_table.model(**row)
if not self.dryrun:
self.session.add(entry)
if count % 100 == 0:
self.session.flush()
if not self.dryrun:
self.session.commit()
if self.dumppsql:
with open('sql/{0}.sql'.format(db_table.name), 'w') as f:
call_command('dumppsql', table=db_table.name, stdout=f)
def store_simple_table_missing_geos(self, db_table, missing_geos):
count = 0
self.stdout.write("Storing missing geos for : %s" % (db_table.name))
for geo in missing_geos:
count += 1
row = {
'geo_level': geo[0],
'geo_code': geo[1],
'geo_version': self.geo_version}
entry = db_table.model(**row)
if not self.dryrun:
self.session.add(entry)
if count % 100 == 0:
self.session.flush()
if not self.dryrun:
self.session.commit()
if self.dumppsql:
with open('sql/{0}.sql'.format(db_table.name), 'w') as f:
call_command('dumppsql', table=db_table.name, stdout=f)
| Code4SA/wazimap-za | wazimap_za/management/commands/checkdata.py | Python | mit | 11,305 |
# Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
try:
from platformio import util
except ImportError:
import sys
for _path in sys.path:
if "platformio" in _path:
sys.path.insert(0, _path[:_path.rfind("platformio") - 1])
break
from platformio import util
import json
from os import getenv
from os.path import isfile, join
from time import time
from SCons.Script import (COMMAND_LINE_TARGETS, DefaultEnvironment, Exit,
SConscript, SConscriptChdir, Variables)
from platformio.exception import UnknownBoard
# AllowSubstExceptions()
# allow common variables from INI file
commonvars = Variables(None)
commonvars.AddVariables(
("BUILD_SCRIPT",),
("EXTRA_SCRIPT",),
("PIOENV",),
("PLATFORM",),
# package aliases
("PIOPACKAGE_TOOLCHAIN",),
("PIOPACKAGE_UPLOADER",),
# options
("FRAMEWORK",),
("BUILD_FLAGS",),
("SRC_BUILD_FLAGS",),
("SRC_FILTER",),
("LIB_DFCYCLIC",),
("LIB_IGNORE",),
("LIB_USE",),
# board options
("BOARD",),
("BOARD_MCU",),
("BOARD_F_CPU",),
# upload options
("UPLOAD_PORT",),
("UPLOAD_PROTOCOL",),
("UPLOAD_SPEED",)
)
DefaultEnvironment(
tools=[
"gcc", "g++", "as", "ar", "gnulink",
"platformio", "pioupload", "pioar", "piomisc"
],
toolpath=[join("$PIOBUILDER_DIR", "tools")],
variables=commonvars,
UNIX_TIME=int(time()),
PIOHOME_DIR=util.get_home_dir(),
PROJECT_DIR=util.get_project_dir(),
PROJECTLIB_DIR=util.get_projectlib_dir(),
PROJECTSRC_DIR=util.get_projectsrc_dir(),
PIOENVS_DIR=util.get_pioenvs_dir(),
PIOBUILDER_DIR=join(util.get_source_dir(), "builder"),
PIOPACKAGES_DIR=join("$PIOHOME_DIR", "packages"),
BUILD_DIR=join("$PIOENVS_DIR", "$PIOENV"),
BUILDSRC_DIR=join("$BUILD_DIR", "src"),
LIBSOURCE_DIRS=[
"$PROJECTLIB_DIR",
util.get_lib_dir(),
join("$PLATFORMFW_DIR", "libraries")
]
)
env = DefaultEnvironment()
if "BOARD" in env:
try:
env.Replace(BOARD_OPTIONS=util.get_boards(env.subst("$BOARD")))
except UnknownBoard as e:
env.Exit("Error: %s" % str(e))
if "BOARD_MCU" not in env:
env.Replace(BOARD_MCU="${BOARD_OPTIONS['build']['mcu']}")
if "BOARD_F_CPU" not in env:
env.Replace(BOARD_F_CPU="${BOARD_OPTIONS['build']['f_cpu']}")
if "UPLOAD_PROTOCOL" not in env:
env.Replace(
UPLOAD_PROTOCOL="${BOARD_OPTIONS['upload'].get('protocol', None)}")
if "UPLOAD_SPEED" not in env:
env.Replace(
UPLOAD_SPEED="${BOARD_OPTIONS['upload'].get('speed', None)}")
if "ldscript" in env.get("BOARD_OPTIONS", {}).get("build", {}):
env.Replace(
LDSCRIPT_PATH=(
env['BOARD_OPTIONS']['build']['ldscript']
if isfile(env['BOARD_OPTIONS']['build']['ldscript'])
else join("$PIOHOME_DIR", "packages", "ldscripts",
"${BOARD_OPTIONS['build']['ldscript']}")
)
)
if env['PLATFORM'] != env.get("BOARD_OPTIONS", {}).get("platform"):
Exit("Error: '%s' platform doesn't support this board. "
"Use '%s' platform instead." % (
env['PLATFORM'],
env.get("BOARD_OPTIONS", {}).get("platform")))
for opt in ("LIB_IGNORE", "LIB_USE"):
if opt not in env:
continue
env[opt] = [l.strip() for l in env[opt].split(",") if l.strip()]
env.PrependENVPath(
"PATH",
env.subst(join("$PIOPACKAGES_DIR", "$PIOPACKAGE_TOOLCHAIN", "bin"))
)
SConscriptChdir(0)
SConscript(env.subst("$BUILD_SCRIPT"))
if getenv("PLATFORMIO_EXTRA_SCRIPT", env.get("EXTRA_SCRIPT", None)):
SConscript(getenv("PLATFORMIO_EXTRA_SCRIPT", env.get("EXTRA_SCRIPT")))
if "envdump" in COMMAND_LINE_TARGETS:
print env.Dump()
Exit()
if "idedata" in COMMAND_LINE_TARGETS:
print json.dumps(env.DumpIDEData())
Exit()
| bkudria/platformio | platformio/builder/main.py | Python | mit | 3,989 |
# -*- coding: utf-8 -*-
class Item(object):
def __init__(self, uid, name, time):
self._uid = uid
self._name = name
self._time = time
@property
def name(self):
return self._name
@property
def uid(self):
return self._uid
def display(self, size):
delta = size - len(self._name) - len(self._time) -1
if delta < 0:
return self._name[:size-len(self._time)-1] + ' ' + self._time
else:
return self._name + ' '*(delta+1) + self._time
class ItemList(object):
def __init__(self):
self.clear()
def clear(self):
self._items = []
self._offset = 0
self._position = 0
self._selected = []
def add(self, item):
self._items.append(item)
def go_up(self):
self._position -= 1
self._position %= len(self._items)
def go_down(self):
self._position += 1
self._position %= len(self._items)
def go_top(self):
self._position = 0
def go_bottom(self):
self._position = len(self._items)-1
def is_empty(self):
return len(self._items) == 0
def get_current_uid(self):
return self._items[self._position].uid
def _compute_offset(self, max_len):
if self._position < self._offset:
self._offset = self._position
elif self._position-self._offset > max_len-1:
self._offset = self._position-max_len+1
def visible_items(self, max_len):
self._compute_offset(max_len)
return self._items[self._offset:self._offset+max_len]
def select(self):
if self._position in self._selected:
self._selected.remove(self._position)
else:
self._selected.append(self._position)
def unselect_all(self):
self._selected = []
def has_selection(self):
return len(self._selected) > 0
def selected_items(self):
for i in self._selected:
yield self._items[i]
def position_item(self):
return self._items[self._position]
def is_selected(self, i, offset=True):
if offset:
i += self._offset
return i in self._selected
def is_position(self, i, offset=True):
if offset:
i += self._offset
return i == self._position
class Playlist(object):
def __init__(self, space=1):
self._space = space
self.clear()
def clear(self):
self._list = []
self._iplay = 0
self._offset = 0
def add(self, item):
self._list.append(item)
def is_over(self):
return self._iplay >= len(self._list)
def current_uid(self):
return self._list[self._iplay].uid
def next(self, step=1, secure=True):
self._iplay += step
if secure and self._iplay > len(self._list):
self._iplay = len(self._list)
def previous(self, step=1, secure=True):
self._iplay -= step
if secure and self._iplay < 0:
self._iplay = 0
def _compute_offset(self, max_len):
if self._iplay-self._space < self._offset:
self._offset = max(0, self._iplay-self._space)
elif self._iplay - self._offset > max_len-self._space-1:
self._offset = min(len(self._list)-max_len,
self._iplay-max_len+self._space+1)
def visible_items(self, max_len):
self._compute_offset(max_len)
return self._list[self._offset:self._offset+max_len]
def is_current(self, i, offset=True):
if offset:
i += self._offset
return i == self._iplay
| NiZiL/clitube | clitube/model.py | Python | mit | 3,663 |
from sympy import *
x = symbols('x')
print limit(sin(x)/x, x, 0) | NTomtishen/src | SympyTest.py | Python | cc0-1.0 | 64 |
import numpy as np
from numpy.testing import assert_array_almost_equal
from mne.connectivity import phase_slope_index
def test_psi():
"""Test Phase Slope Index (PSI) estimation."""
sfreq = 50.
n_signals = 3
n_epochs = 10
n_times = 500
rng = np.random.RandomState(42)
data = rng.randn(n_epochs, n_signals, n_times)
# simulate time shifts
for i in range(n_epochs):
data[i, 1, 10:] = data[i, 0, :-10] # signal 0 is ahead
data[i, 2, :-10] = data[i, 0, 10:] # signal 2 is ahead
psi, freqs, times, n_epochs, n_tapers = phase_slope_index(
data, mode='fourier', sfreq=sfreq)
assert psi[1, 0, 0] < 0
assert psi[2, 0, 0] > 0
indices = (np.array([0]), np.array([1]))
psi_2, freqs, times, n_epochs, n_tapers = phase_slope_index(
data, mode='fourier', sfreq=sfreq, indices=indices)
# the measure is symmetric (sign flip)
assert_array_almost_equal(psi_2[0, 0], -psi[1, 0, 0])
cwt_freqs = np.arange(5., 20, 0.5)
psi_cwt, freqs, times, n_epochs, n_tapers = phase_slope_index(
data, mode='cwt_morlet', sfreq=sfreq, cwt_freqs=cwt_freqs,
indices=indices)
assert np.all(psi_cwt > 0)
assert psi_cwt.shape[-1] == n_times
| adykstra/mne-python | mne/connectivity/tests/test_effective.py | Python | bsd-3-clause | 1,238 |
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
__doc__ = """Module to detect the Platform/Windows SDK
PSDK 2003 R1 is the earliest version detected.
"""
import os
import SCons.Errors
import SCons.Util
from .common import debug, read_reg
# SDK Checks. This is of course a mess as everything else on MS platforms. Here
# is what we do to detect the SDK:
#
# For Windows SDK >= 6.0: just look into the registry entries:
# HKLM\Software\Microsoft\Microsoft SDKs\Windows
# All the keys in there are the available versions.
#
# For Platform SDK before 6.0 (2003 server R1 and R2, etc...), there does not
# seem to be any sane registry key, so the precise location is hardcoded.
#
# For versions below 2003R1, it seems the PSDK is included with Visual Studio?
#
# Also, per the following:
# http://benjamin.smedbergs.us/blog/tag/atl/
# VC++ Professional comes with the SDK, VC++ Express does not.
# Location of the SDK (checked for 6.1 only)
_CURINSTALLED_SDK_HKEY_ROOT = \
r"Software\Microsoft\Microsoft SDKs\Windows\CurrentInstallFolder"
class SDKDefinition:
"""
An abstract base class for trying to find installed SDK directories.
"""
def __init__(self, version, **kw):
self.version = version
self.__dict__.update(kw)
def find_sdk_dir(self):
"""Try to find the MS SDK from the registry.
Return None if failed or the directory does not exist.
"""
if not SCons.Util.can_read_reg:
debug('find_sdk_dir(): can not read registry')
return None
hkey = self.HKEY_FMT % self.hkey_data
debug('find_sdk_dir(): checking registry:{}'.format(hkey))
try:
sdk_dir = read_reg(hkey)
except SCons.Util.WinError as e:
debug('find_sdk_dir(): no SDK registry key {}'.format(repr(hkey)))
return None
debug('find_sdk_dir(): Trying SDK Dir: {}'.format(sdk_dir))
if not os.path.exists(sdk_dir):
debug('find_sdk_dir(): {} not on file system'.format(sdk_dir))
return None
ftc = os.path.join(sdk_dir, self.sanity_check_file)
if not os.path.exists(ftc):
debug("find_sdk_dir(): sanity check {} not found".format(ftc))
return None
return sdk_dir
def get_sdk_dir(self):
"""Return the MSSSDK given the version string."""
try:
return self._sdk_dir
except AttributeError:
sdk_dir = self.find_sdk_dir()
self._sdk_dir = sdk_dir
return sdk_dir
def get_sdk_vc_script(self,host_arch, target_arch):
""" Return the script to initialize the VC compiler installed by SDK
"""
if host_arch == 'amd64' and target_arch == 'x86':
# No cross tools needed compiling 32 bits on 64 bit machine
host_arch=target_arch
arch_string=target_arch
if host_arch != target_arch:
arch_string='%s_%s'%(host_arch,target_arch)
debug("get_sdk_vc_script():arch_string:%s host_arch:%s target_arch:%s"%(arch_string,
host_arch,
target_arch))
file=self.vc_setup_scripts.get(arch_string,None)
debug("get_sdk_vc_script():file:%s"%file)
return file
class WindowsSDK(SDKDefinition):
"""
A subclass for trying to find installed Windows SDK directories.
"""
HKEY_FMT = r'Software\Microsoft\Microsoft SDKs\Windows\v%s\InstallationFolder'
def __init__(self, *args, **kw):
SDKDefinition.__init__(self, *args, **kw)
self.hkey_data = self.version
class PlatformSDK(SDKDefinition):
"""
A subclass for trying to find installed Platform SDK directories.
"""
HKEY_FMT = r'Software\Microsoft\MicrosoftSDK\InstalledSDKS\%s\Install Dir'
def __init__(self, *args, **kw):
SDKDefinition.__init__(self, *args, **kw)
self.hkey_data = self.uuid
#
# The list of VC initialization scripts installed by the SDK
# These should be tried if the vcvarsall.bat TARGET_ARCH fails
preSDK61VCSetupScripts = { 'x86' : r'bin\vcvars32.bat',
'amd64' : r'bin\vcvarsamd64.bat',
'x86_amd64': r'bin\vcvarsx86_amd64.bat',
'x86_ia64' : r'bin\vcvarsx86_ia64.bat',
'ia64' : r'bin\vcvarsia64.bat'}
SDK61VCSetupScripts = {'x86' : r'bin\vcvars32.bat',
'amd64' : r'bin\amd64\vcvarsamd64.bat',
'x86_amd64': r'bin\x86_amd64\vcvarsx86_amd64.bat',
'x86_ia64' : r'bin\x86_ia64\vcvarsx86_ia64.bat',
'ia64' : r'bin\ia64\vcvarsia64.bat'}
SDK70VCSetupScripts = { 'x86' : r'bin\vcvars32.bat',
'amd64' : r'bin\vcvars64.bat',
'x86_amd64': r'bin\vcvarsx86_amd64.bat',
'x86_ia64' : r'bin\vcvarsx86_ia64.bat',
'ia64' : r'bin\vcvarsia64.bat'}
SDK100VCSetupScripts = {'x86' : r'bin\vcvars32.bat',
'amd64' : r'bin\vcvars64.bat',
'x86_amd64': r'bin\x86_amd64\vcvarsx86_amd64.bat',
'x86_arm' : r'bin\x86_arm\vcvarsx86_arm.bat'}
# The list of support SDKs which we know how to detect.
#
# The first SDK found in the list is the one used by default if there
# are multiple SDKs installed. Barring good reasons to the contrary,
# this means we should list SDKs from most recent to oldest.
#
# If you update this list, update the documentation in Tool/mssdk.xml.
SupportedSDKList = [
WindowsSDK('10.0A',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK70VCSetupScripts,
),
WindowsSDK('10.0',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK70VCSetupScripts,
),
WindowsSDK('7.1',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK70VCSetupScripts,
),
WindowsSDK('7.0A',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK70VCSetupScripts,
),
WindowsSDK('7.0',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK70VCSetupScripts,
),
WindowsSDK('6.1',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK61VCSetupScripts,
),
WindowsSDK('6.0A',
sanity_check_file=r'include\windows.h',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = preSDK61VCSetupScripts,
),
WindowsSDK('6.0',
sanity_check_file=r'bin\gacutil.exe',
include_subdir='include',
lib_subdir='lib',
vc_setup_scripts = preSDK61VCSetupScripts,
),
PlatformSDK('2003R2',
sanity_check_file=r'SetEnv.Cmd',
uuid="D2FF9F89-8AA2-4373-8A31-C838BF4DBBE1",
vc_setup_scripts = preSDK61VCSetupScripts,
),
PlatformSDK('2003R1',
sanity_check_file=r'SetEnv.Cmd',
uuid="8F9E5EF3-A9A5-491B-A889-C58EFFECE8B3",
vc_setup_scripts = preSDK61VCSetupScripts,
),
]
SupportedSDKMap = {}
for sdk in SupportedSDKList:
SupportedSDKMap[sdk.version] = sdk
# Finding installed SDKs isn't cheap, because it goes not only to the
# registry but also to the disk to sanity-check that there is, in fact,
# an SDK installed there and that the registry entry isn't just stale.
# Find this information once, when requested, and cache it.
InstalledSDKList = None
InstalledSDKMap = None
def get_installed_sdks():
global InstalledSDKList
global InstalledSDKMap
debug('get_installed_sdks()')
if InstalledSDKList is None:
InstalledSDKList = []
InstalledSDKMap = {}
for sdk in SupportedSDKList:
debug('trying to find SDK %s' % sdk.version)
if sdk.get_sdk_dir():
debug('found SDK %s' % sdk.version)
InstalledSDKList.append(sdk)
InstalledSDKMap[sdk.version] = sdk
return InstalledSDKList
# We may be asked to update multiple construction environments with
# SDK information. When doing this, we check on-disk for whether
# the SDK has 'mfc' and 'atl' subdirectories. Since going to disk
# is expensive, cache results by directory.
SDKEnvironmentUpdates = {}
def set_sdk_by_directory(env, sdk_dir):
global SDKEnvironmentUpdates
debug('set_sdk_by_directory: Using dir:%s'%sdk_dir)
try:
env_tuple_list = SDKEnvironmentUpdates[sdk_dir]
except KeyError:
env_tuple_list = []
SDKEnvironmentUpdates[sdk_dir] = env_tuple_list
include_path = os.path.join(sdk_dir, 'include')
mfc_path = os.path.join(include_path, 'mfc')
atl_path = os.path.join(include_path, 'atl')
if os.path.exists(mfc_path):
env_tuple_list.append(('INCLUDE', mfc_path))
if os.path.exists(atl_path):
env_tuple_list.append(('INCLUDE', atl_path))
env_tuple_list.append(('INCLUDE', include_path))
env_tuple_list.append(('LIB', os.path.join(sdk_dir, 'lib')))
env_tuple_list.append(('LIBPATH', os.path.join(sdk_dir, 'lib')))
env_tuple_list.append(('PATH', os.path.join(sdk_dir, 'bin')))
for variable, directory in env_tuple_list:
env.PrependENVPath(variable, directory)
def get_sdk_by_version(mssdk):
if mssdk not in SupportedSDKMap:
raise SCons.Errors.UserError("SDK version {} is not supported".format(repr(mssdk)))
get_installed_sdks()
return InstalledSDKMap.get(mssdk)
def get_default_sdk():
"""Set up the default Platform/Windows SDK."""
get_installed_sdks()
if not InstalledSDKList:
return None
return InstalledSDKList[0]
def mssdk_setup_env(env):
debug('mssdk_setup_env()')
if 'MSSDK_DIR' in env:
sdk_dir = env['MSSDK_DIR']
if sdk_dir is None:
return
sdk_dir = env.subst(sdk_dir)
debug('mssdk_setup_env: Using MSSDK_DIR:{}'.format(sdk_dir))
elif 'MSSDK_VERSION' in env:
sdk_version = env['MSSDK_VERSION']
if sdk_version is None:
msg = "SDK version is specified as None"
raise SCons.Errors.UserError(msg)
sdk_version = env.subst(sdk_version)
mssdk = get_sdk_by_version(sdk_version)
if mssdk is None:
msg = "SDK version %s is not installed" % sdk_version
raise SCons.Errors.UserError(msg)
sdk_dir = mssdk.get_sdk_dir()
debug('mssdk_setup_env: Using MSSDK_VERSION:%s'%sdk_dir)
elif 'MSVS_VERSION' in env:
msvs_version = env['MSVS_VERSION']
debug('mssdk_setup_env:Getting MSVS_VERSION from env:%s'%msvs_version)
if msvs_version is None:
debug('mssdk_setup_env thinks msvs_version is None')
return
msvs_version = env.subst(msvs_version)
from . import vs
msvs = vs.get_vs_by_version(msvs_version)
debug('mssdk_setup_env:msvs is :%s'%msvs)
if not msvs:
debug('mssdk_setup_env: no VS version detected, bailingout:%s'%msvs)
return
sdk_version = msvs.sdk_version
debug('msvs.sdk_version is %s'%sdk_version)
if not sdk_version:
return
mssdk = get_sdk_by_version(sdk_version)
if not mssdk:
mssdk = get_default_sdk()
if not mssdk:
return
sdk_dir = mssdk.get_sdk_dir()
debug('mssdk_setup_env: Using MSVS_VERSION:%s'%sdk_dir)
else:
mssdk = get_default_sdk()
if not mssdk:
return
sdk_dir = mssdk.get_sdk_dir()
debug('mssdk_setup_env: not using any env values. sdk_dir:%s'%sdk_dir)
set_sdk_by_directory(env, sdk_dir)
#print "No MSVS_VERSION: this is likely to be a bug"
def mssdk_exists(version=None):
sdks = get_installed_sdks()
if version is None:
return len(sdks) > 0
return version in sdks
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lightmare/mapnik | scons/scons-local-4.1.0/SCons/Tool/MSCommon/sdk.py | Python | lgpl-2.1 | 15,044 |
# Copyright 2020 The UniqueRandomizer Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Sampling approach to the Traveling Salesman Problem."""
import functools
import timeit
from absl import app
from absl import flags
import numpy as np
import scipy.spatial
from unique_randomizer import unique_randomizer as ur
FLAGS = flags.FLAGS
NEAREST_NEIGHBOR = 'nearest_neighbor'
FARTHEST_INSERTION = 'farthest_insertion'
FARTHEST_INSERTION_SAMPLING = 'farthest_insertion_sampling'
FARTHEST_INSERTION_BS = 'farthest_insertion_bs'
flags.DEFINE_integer('dataset_size', 10000, 'The number of TSP instances.')
flags.DEFINE_integer('graph_size', 50, 'The number of nodes in a TSP graph.')
flags.DEFINE_integer('seed', 1234, 'Random seed.')
flags.DEFINE_enum('solver',
FARTHEST_INSERTION,
[NEAREST_NEIGHBOR, FARTHEST_INSERTION,
FARTHEST_INSERTION_SAMPLING, FARTHEST_INSERTION_BS],
'The TSP solver to use.')
flags.DEFINE_integer('num_samples', 300, 'The number of samples to use.')
flags.DEFINE_boolean('unique_samples', True, 'Whether to use unique sampling.')
flags.DEFINE_boolean('caching', True,
'Whether to cache probabilities during unique sampling.')
flags.DEFINE_float('temperature', 0.2, 'Temperature for sampling.')
def nearest_neighbor(nodes):
"""Nearest neighbor construction of the tour in order."""
# This code is inspired by
# https://github.com/wouterkool/attention-learn-to-route/blob/master/problems/tsp/tsp_baseline.py.
# distances[i][j] is the Euclidean distance from nodes[i] to nodes[j].
distances = scipy.spatial.distance_matrix(nodes, nodes)
current_node = 0
tour = [current_node]
tour_cost = 0.0
distance_to_start = distances[current_node].copy()
for _ in range(len(nodes) - 1):
# current_node is no longer a valid neighbor (of any other node).
distances[:, current_node] = np.Inf
neighbor = distances[current_node].argmin()
tour_cost += distances[current_node][neighbor]
tour.append(neighbor)
current_node = neighbor
tour_cost += distance_to_start[current_node]
return tour_cost, tour
def _insertion_cost(distances, previous_node, next_node, inserted_node):
"""Calculates insertion costs of inserting a node into a tour.
Args:
distances: A distance matrix.
previous_node: The node before the inserted node. Can be a vector.
next_node: The node after the inserted node. Can be a vector.
inserted_node: The node to insert.
Returns:
The extra tour cost(s) when inserting the node at the given location(s).
"""
return (distances[previous_node, inserted_node]
+ distances[inserted_node, next_node]
- distances[previous_node, next_node])
def farthest_insertion(nodes, randomizer=None, temperature=1.0, caching=True):
"""Inserts the farthest node from the tour, at the best index."""
# This code is inspired by
# https://github.com/wouterkool/attention-learn-to-route/blob/master/problems/tsp/tsp_baseline.py.
num_nodes = len(nodes)
distances = scipy.spatial.distance_matrix(nodes, nodes)
unused_mask = np.ones(num_nodes, dtype=bool)
tour = []
for i in range(num_nodes):
unused_indices = np.flatnonzero(unused_mask)
if i == 0:
# Choose the node that is the farthest from any other node.
next_node = distances.max(1).argmax()
else:
# All distances from unused nodes to used nodes.
candidate_distances = distances[np.ix_(unused_mask, ~unused_mask)]
# Choose the next node, which is the farthest from the tour.
next_node = unused_indices[candidate_distances.min(1).argmax()]
unused_mask[next_node] = False
if i < 3:
# The first 3 nodes can be inserted in arbitrary order (symmetry).
insertion_index = i - 1 # Append node to the end.
elif randomizer is None:
# Find the costs for inserting next_node at all possible locations.
insertion_costs = _insertion_cost(distances, tour, np.roll(tour, -1),
next_node)
# Find insertion index with lowest insertion cost.
insertion_index = np.argmin(insertion_costs)
elif not caching or randomizer.needs_probabilities():
insertion_costs = _insertion_cost(distances, tour, np.roll(tour, -1),
next_node)
# Use the insertion costs to define a probability distribution.
unnormalized = np.power(np.reciprocal(insertion_costs), 1/temperature)
distribution = unnormalized / np.sum(unnormalized)
insertion_index = randomizer.sample_distribution(distribution)
else:
# Use probabilities in the trie, without computing insertion costs.
insertion_index = randomizer.sample_distribution(None)
tour.insert(insertion_index + 1, next_node)
cost = distances[tour, np.roll(tour, -1)].sum()
return cost, tour
def farthest_insertion_sampling(nodes, num_samples, unique_samples,
temperature, caching=True):
"""Samples using the farthest-insertion heuristic."""
min_cost, best_tour = farthest_insertion(nodes, randomizer=None)
randomizer = (ur.UniqueRandomizer() if unique_samples
else ur.NormalRandomizer())
for _ in range(1, num_samples):
cost, tour = farthest_insertion(nodes, randomizer, temperature,
caching=caching)
randomizer.mark_sequence_complete()
if cost < min_cost:
min_cost = cost
best_tour = tour
return min_cost, best_tour
def farthest_insertion_bs(nodes, num_samples, temperature):
"""Samples with beam search."""
num_nodes = len(nodes)
distances = scipy.spatial.distance_matrix(nodes, nodes)
unused_mask = np.ones(num_nodes, dtype=bool)
# Create the starting tour. By symmetry, the first 3 nodes can be inserted in
# any order.
root_tour = []
for i in range(3):
unused_indices = np.flatnonzero(unused_mask)
if i == 0:
# Choose the node that is the farthest from any other node.
next_node = distances.max(1).argmax()
else:
# All distances from unused nodes to used nodes.
candidate_distances = distances[np.ix_(unused_mask, ~unused_mask)]
# Choose the next node, which is the farthest from the tour.
next_node = unused_indices[candidate_distances.min(1).argmax()]
unused_mask[next_node] = False
root_tour.insert(i, next_node)
# Beam nodes include the partial tour, unused mask, and its log probability.
beam = [(root_tour, unused_mask, 0.0)]
for _ in range(3, num_nodes):
candidates = []
# Expand nodes in the beam.
for tour, unused_mask, log_prob in beam:
unused_indices = np.flatnonzero(unused_mask)
candidate_distances = distances[np.ix_(unused_mask, ~unused_mask)]
next_node = unused_indices[candidate_distances.min(1).argmax()]
unused_mask = np.copy(unused_mask)
unused_mask[next_node] = False
# Use the insertion costs to define a probability distribution.
insertion_costs = _insertion_cost(distances, tour, np.roll(tour, -1),
next_node)
unnormalized = np.power(np.reciprocal(insertion_costs), 1/temperature)
distribution = unnormalized / np.sum(unnormalized)
for i, new_log_prob in enumerate(np.log(distribution)):
new_tour = list(tour)
new_tour.insert(i + 1, next_node)
candidates.append(
(new_tour, unused_mask, log_prob + new_log_prob))
# Select the best candidates.
if num_samples >= len(candidates):
beam = candidates
else:
scores = [node[2] for node in candidates]
top_k_indices = np.argpartition(scores, -num_samples)[-num_samples:]
beam = [candidates[i] for i in top_k_indices]
best_tour = None
best_cost = float('inf')
for tour, _, _ in beam:
cost = distances[tour, np.roll(tour, -1)].sum()
if cost < best_cost:
best_cost = cost
best_tour = tour
assert len(best_tour) == num_nodes
return best_cost, best_tour
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
np.random.seed(FLAGS.seed)
data = np.random.uniform(size=(FLAGS.dataset_size, FLAGS.graph_size, 2))
per_instance_seeds = np.random.randint(1000000, size=(FLAGS.dataset_size))
if FLAGS.solver == NEAREST_NEIGHBOR:
solver = nearest_neighbor
elif FLAGS.solver == FARTHEST_INSERTION:
solver = farthest_insertion
elif FLAGS.solver == FARTHEST_INSERTION_SAMPLING:
solver = functools.partial(farthest_insertion_sampling,
num_samples=FLAGS.num_samples,
unique_samples=FLAGS.unique_samples,
temperature=FLAGS.temperature,
caching=FLAGS.caching)
elif FLAGS.solver == FARTHEST_INSERTION_BS:
solver = functools.partial(farthest_insertion_bs,
num_samples=FLAGS.num_samples,
temperature=FLAGS.temperature)
else:
raise app.UsageError('Unknown solver: {}'.format(FLAGS.solver))
start_time = timeit.default_timer()
solutions = []
for instance, seed in zip(data, per_instance_seeds):
np.random.seed(seed)
solutions.append(solver(instance))
elapsed_time = timeit.default_timer() - start_time
dataset_cost = 0
for cost, tour in solutions:
if sorted(tour) != list(range(FLAGS.graph_size)):
raise ValueError('Tour is malformed.')
dataset_cost += cost
print('Dataset size: {}'.format(FLAGS.dataset_size))
print('Graph size: {}'.format(FLAGS.graph_size))
print('Seed: {}'.format(FLAGS.seed))
print('Solver: {}'.format(FLAGS.solver))
if FLAGS.solver == FARTHEST_INSERTION_SAMPLING:
print()
print('Sampling-related options:')
print(' Num samples: {}'.format(FLAGS.num_samples))
print(' Unique samples: {}'.format(FLAGS.unique_samples))
print(' Temperature: {}'.format(FLAGS.temperature))
print(' Caching: {}'.format(FLAGS.caching))
if FLAGS.solver == FARTHEST_INSERTION_BS:
print()
print('Beam search options:')
print(' Num samples: {}'.format(FLAGS.num_samples))
print(' Temperature: {}'.format(FLAGS.temperature))
print()
print('Time: {:.2f} sec'.format(elapsed_time))
print('Average cost: {:.5f}'.format(dataset_cost / len(data)))
if __name__ == '__main__':
app.run(main)
| google-research/unique-randomizer | unique_randomizer/tsp/tsp.py | Python | apache-2.0 | 10,950 |
# SPDX-FileCopyrightText: 2019 Jean-Louis Fuchs <ganwell@fangorn.ch>
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Test if the tree stays consistent."""
import mpipe
import os
from os import path
from build._rbtree_tests import lib, ffi
from hypothesis.stateful import GenericStateMachine
from hypothesis.strategies import tuples, sampled_from, just, integers
class Node(object):
"""Represent a node in the tree."""
def __init__(self, node, value):
node.value = value
self.node = node
def __eq__(self, other):
"""Eq comparison."""
return self.node.value == other.node.value
def __lt__(self, other):
"""Lt comparison."""
return self.node.value < other.node.value
def __hash__(self):
"""Hash function."""
return self.node.value
class GenTree(GenericStateMachine):
"""Test if the stays consistent."""
def __init__(self):
self.comparison = set()
self.key = Node(ffi.new("node_t*"), 0)
lib.test_init()
def steps(self):
add_node = tuples(
just("add"),
integers(
min_value=-2**30,
max_value=(2**30) - 1
)
)
rnd_find = tuples(
just("rnd_find"),
integers(
min_value=-2**30,
max_value=(2**30) - 1
)
)
delete_node = tuples(just(
"delete_node"
), sampled_from(sorted(self.comparison)))
delete = tuples(just(
"delete"
), sampled_from(sorted(self.comparison)))
replace_node = tuples(just(
"replace_node"
), sampled_from(sorted(self.comparison)))
replace = tuples(just(
"replace"
), sampled_from(sorted(self.comparison)))
find = tuples(just("find"), sampled_from(sorted(self.comparison)))
if not self.comparison:
return add_node | rnd_find
else:
return (
add_node | rnd_find | delete_node | delete | find |
replace | replace_node
)
def execute_step(self, step):
action, value = step
if action == 'delete_node':
assert lib.test_remove(value.node) == 0
self.comparison.remove(value)
assert value not in self.comparison
if not self.comparison:
assert lib.test_tree_nil() == 1
else:
assert lib.test_remove(value.node) == 1
elif action == 'delete':
lib.test_remove_node(value.node)
self.comparison.remove(value)
assert value not in self.comparison
if not self.comparison:
assert lib.test_tree_nil() == 1
elif action == 'replace_node':
new = ffi.new("node_t*")
new.value = value.node.value
assert lib.test_replace_node(value.node, new) == 0
value.node = new
other = Node(ffi.new("node_t*"), value.node.value + 1)
if other not in self.comparison:
assert lib.test_replace_node(value.node, other.node) == 1
elif action == 'replace':
new = ffi.new("node_t*")
new.value = value.node.value
assert lib.test_replace(value.node, new) == 0
value.node = new
other = Node(ffi.new("node_t*"), value.node.value + 1)
if other not in self.comparison:
assert lib.test_replace(value.node, other.node) == 1
elif action == 'add':
node = Node(ffi.new("node_t*"), value)
if node in self.comparison:
assert lib.test_add(node.node) != 0
else:
assert lib.test_add(node.node) == 0
self.comparison.add(node)
assert node in self.comparison
elif action == 'rnd_find':
key = self.key
key.node.value = value
if key in self.comparison:
assert lib.test_find(key.node) == 0
else:
assert lib.test_find(key.node) != 0
elif action == 'find':
key = self.key
key.node.value = value.node.value
assert lib.test_size() == len(self.comparison)
assert lib.test_find(key.node) == 0
else:
assert False
class GenMpipeTree(GenericStateMachine):
"""Test if the stays consistent."""
def __init__(self):
self.comparison = set()
build = os.environ.get("BUILD")
self.proc = mpipe.open([path.join(
build or "build",
"test_tree"
)])
mpipe.write(self.proc, (lib.fn_init, 0))
assert mpipe.read(self.proc) == [0]
def teardown(self):
mpipe.close(self.proc)
self.proc = None # Hypothesis seems to keep GSM objects
def steps(self):
add_node = tuples(
just("add"),
integers(
min_value=-2**30,
max_value=(2**30) - 1
)
)
delete_node = tuples(just(
"delete_node"
), sampled_from(sorted(self.comparison)))
replace_node = tuples(just(
"replace_node"
), sampled_from(sorted(self.comparison)))
delete = tuples(just(
"delete"
), sampled_from(sorted(self.comparison)))
replace = tuples(just(
"replace"
), sampled_from(sorted(self.comparison)))
rnd_find = tuples(
just("rnd_find"),
integers(
min_value=-2**30,
max_value=(2**30) - 1
)
)
find = tuples(just("find"), sampled_from(sorted(self.comparison)))
if not self.comparison:
return add_node | rnd_find
else:
return (
add_node | delete_node | rnd_find | replace_node |
find | delete | replace
)
def execute_step(self, step):
proc = self.proc
action, value = step
if action == 'add':
mpipe.write(proc, (lib.fn_add, value))
if value in self.comparison:
assert mpipe.read(proc) != [0]
else:
assert mpipe.read(proc) == [0]
self.comparison.add(value)
return
elif action == "rnd_find":
mpipe.write(proc, (lib.fn_find, value))
if value in self.comparison:
assert mpipe.read(proc) == [0]
else:
assert mpipe.read(proc) != [0]
return
elif action == "find":
mpipe.write(proc, (lib.fn_find, value))
elif action == "replace_node":
mpipe.write(proc, (lib.fn_replace_node, value))
elif action == "replace":
mpipe.write(proc, (lib.fn_replace, value))
elif action == "delete":
assert action == "delete"
mpipe.write(proc, (lib.fn_remove, value))
self.comparison.remove(value)
else:
assert action == "delete_node"
mpipe.write(proc, (lib.fn_remove_node, value))
self.comparison.remove(value)
assert mpipe.read(proc) == [0]
TestTree = GenTree.TestCase
TestMpipeTree = GenMpipeTree.TestCase
| ganwell/rbtree | old_dev/src/test_tree.py | Python | mit | 7,326 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
----------------------------------------------------------------------
This model acts as a bridge between the DataViewCtrl and the data
This model provides these data columns:
0. DisplayName: string
1. ControllerName:string
2. Connector: integer
3. TotalChannels: integer
4. TotalPixels: integer
5. UniverseStart: integer
6. UniverseEnd: integer
7. ChannelStart: integer
8. ChannelEnd: integer
9. xLightStart: integer
10. xLightEnd: integer
'''
import wx
import logging
import wx.dataview as dv
from lights import Session, Display, Connections, Prop, Controllers, Connectors
__pgmname__ = 'datamodel'
__author__ = "AJ Reynolds"
__email__ = "stampedeboss@gmail.com"
__maintainer__ = __author__
__copyright__ = "Copyright 2016, AJ Reynolds"
__license__ = "CC0"
log = logging.getLogger(__pgmname__)
class dataModel(dv.PyDataViewModel):
def __init__(self, data, session):
log.trace("__init__")
super(dataModel, self).__init__()
self.data = data
self.session = session
self.objmapper.UseWeakRefs(True)
self.type = {0: 'string'}
for i in range(1, self.GetColumnCount()):
self.type[i] = 'string'
self.tracking ={}
log.debug("connection.datamodel Initialization Complete")
def GetColumnCount(self):
# Report how many columns this model provides data for.
log.trace("GetColumnCount")
return 11
def GetColumnType(self, col):
# Map the data column numbers to the data type
log.trace("GetColumnType")
return self.type[col]
def IsContainer(self, item):
# Return True if the item has children, False otherwise.
# The hidden root is a container
log.trace("IsContainer: {}".format(item))
if not item:
return True
# and in this model the genre objects are containers
# node = self.ItemToObject(item)
# if isinstance(node, Display):
# return True
# but everything else are not
return False
def GetParent(self, item):
log.trace("GetParent: {}".format(item))
if not item:
return dv.NullDataViewItem
return dv.NullDataViewItem
node = self.ItemToObject(item)
if isinstance(node, Display):
return dv.NullDataViewItem
# elif isinstance(node, Controllers):
# for connector in node.connectors:
# return self.ObjectToItem(connector.connection.display)
# elif isinstance(node, Connections):
# return self.ObjectToItem(node.connector.controller)
def GetChildren(self, parent, children):
log.trace("GetChildren")
# The view calls this method to find the children of any node in the
# control. There is an implicit hidden root node, and the top level
# item(s) should be reported as children of this node. A List view
# simply provides all items as children of this hidden root. A Tree
# view adds additional items as children of the other items, as needed,
# to provide the tree hierachy.
# If the parent item is invalid then it represents the hidden root
# item, so we'll use the genre objects as its children and they will
# end up being the collection of visible roots in our tree.
if not parent:
for item in self.data:
children.append(self.ObjectToItem(item))
return len(children)
# Otherwise we'll fetch the python object associated with the parent
# item and make DV items for each of it's child objects.
# node = self.ItemToObject(parent)
# if isinstance(node, Display):
# for item in self.data:
# if item.connector and item.connector.controller:
# children.append(self.ObjectToItem(item.connector.controller))
# test = len(children)
# return len(children)
# if isinstance(node, Controllers):
# for item in node.connectors:
# if item.connection:
# children.append(self.ObjectToItem(item.connection))
# test = len(children)
# return len(children)
return 0
def GetValue(self, item, col):
# Return the value to be displayed for this item and column.
log.trace("GetValue")
mapper= {}
for i in range(0, self.GetColumnCount()):
mapper[i] = None
node = self.ItemToObject(item)
if isinstance(node, Display):
mapper[0] = node.DisplayName
return mapper[col]
elif isinstance(node, Controllers):
if not node.Name in self.tracking:
self.tracking[node.Name] = {"nextUnivers": node.Universe, "nextChannel": 1 }
mapper[1] = node.Name
return mapper[col]
elif isinstance(node, Connections):
total_pixels = 0
universe_start = 0
universe_end = 0
channel_start = 0
channel_end = 0
xlight_start = 0
xlight_end = 0
for entry in self.data:
if node.DisplayID == entry.DisplayID \
and node.ControllerID == entry.ControllerID \
and node.Connector == entry.Connector:
if entry.propIn.PixelsAllocated:
allocated = entry.propIn.PixelsAllocated
else:
allocated = entry.propIn.Strings * entry.propIn.Pixels
total_pixels += allocated
mapper[0] = node.display.DisplayName
if node.Connector:
mapper[2] = node.Connector
if node.connector.controller:
mapper[1] = node.connector.controller.Name
mapper[3] = total_pixels
mapper[4] = total_pixels * 3
mapper[5] = universe_start
mapper[6] = universe_end
mapper[7] = channel_start
mapper[8] = channel_end
mapper[9] = xlight_start
mapper[10] = xlight_end
return str(mapper[col])
else:
raise RuntimeError("unknown node type")
def SetValue(self, value, item, col):
# We're not allowing edits in column zero
log.trace("SetValue: %s" % value)
node = self.ItemToObject(item)
if isinstance(node, Controllers):
self.session.begin(subtransactions=True)
if col == 1:
node.Name = value
elif col == 2:
node.IP_Address = value
elif col == 3:
node.Universe = value
elif col == 4:
node.Seq = value
elif col == 5:
node.CtlrModelID = value
self.session.commit()
def addItem(self, item):
self.session.begin(subtransactions=True)
self.session.add(item)
self.session.commit()
def delItem(self):
# TODO: Implement delItem Prop
pass
def saveRecs(self):
# TODO: Implement saveRecs Prop
pass
def refreshDB(self):
# self.mdl.Cleared()
self.Show()
print
if __name__ == '__main__':
from lights import Lights
from sys import argv
lights = Lights()
lights.ParseArgs(argv[1:], test=True)
app = wx.App(False)
# Create a session to use the tables
mdl = dataModel()
print mdl | stampedeboss/lights | lights/connection/datamodel.py | Python | cc0-1.0 | 6,388 |
import praw
import random
import requests
import sqlite3
import bot
import datetime
import time
import string
sql = sqlite3.connect('un.db')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS users(idint INT, idstr TEXT, created INT, human TEXT, name TEXT, link_karma INT, comment_karma INT, total_karma INT, available INT, lastscan INT)')
cur.execute('CREATE INDEX IF NOT EXISTS userindex ON users(idint)')
cur.execute('CREATE INDEX IF NOT EXISTS nameindex ON users(name)')
sql.commit()
# 0 - idint
# 1 - idstr
# 2 - created
# 3 - human
# 4 - name
# 5 - link karma
# 6 - comment karma
# 7 - total karma
# 8 - available
# 9 - lastscan
SQL_COLUMNCOUNT = 10
SQL_IDINT = 0
SQL_IDSTR = 1
SQL_CREATED = 2
SQL_HUMAN = 3
SQL_NAME = 4
SQL_LINK_KARMA = 5
SQL_COMMENT_KARMA = 6
SQL_TOTAL_KARMA = 7
SQL_AVAILABLE = 8
SQL_LASTSCAN = 9
print('Logging in.')
USERAGENT = "/u/GoldenSights Usernames data collection: Gathering the creation dates of user accounts in the interest of information.\
More at https://github.com/voussoir/reddit/tree/master/Usernames"
r = praw.Reddit(USERAGENT)
r.login(bot.uG, bot.pG)
AVAILABILITY = {True:'available', False:'unavailable', 'available':1, 'unavailable':0}
HEADER_FULL = ' ID CREATED NAME LINK COMMENT TOTAL LAST SCANNED'
HEADER_BRIEF = ' LAST SCANNED | NAME'
MEMBERFORMAT_FULL = '%s %s %s %s %s (%s) | %s'
MEMBERFORMAT_BRIEF = '%s | %s'
MIN_LASTSCAN_DIFF = 86400 * 3
# Don't rescan a name if we scanned it this many days ago
def human(timestamp):
day = datetime.datetime.utcfromtimestamp(timestamp)
human = datetime.datetime.strftime(day, "%b %d %Y %H:%M:%S UTC")
return human
def getnow(timestamp=True):
now = datetime.datetime.now(datetime.timezone.utc)
if timestamp:
return now.timestamp()
return now
def base36encode(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
"""Converts an integer to a base36 string."""
if not isinstance(number, (int)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def base36decode(number):
return int(number, 36)
def b36(i):
if type(i) == int:
return base36encode(i)
if type(i) == str:
return base36decode(i)
def getentry(**kwargs):
if len(kwargs) != 1:
raise Exception("Only 1 argument please")
kw = list(kwargs.keys())[0]
if kw == 'idint':
cur.execute('SELECT * FROM users WHERE idint=?', [kwargs[kw]])
elif kw == 'idstr':
cur.execute('SELECT * FROM users WHERE idstr=?', [kwargs[kw]])
elif kw == 'name':
cur.execute('SELECT * FROM users WHERE LOWER(name)=?', [kwargs[kw].lower()])
else:
return None
return cur.fetchone()
def reducetonames(users):
outlist = set()
for name in users:
if isinstance(name, str):
if 2 < len(name) < 21:
outlist.add(name.lower())
elif isinstance(name, praw.objects.Redditor):
outlist.add(name.name.lower())
return outlist
def userify_list(users):
outlist = []
if isinstance(users, str):
users = [users]
users = list(reducetonames(users))
for username in users:
try:
preverify = getentry(name=username)
if preverify is not None:
preverify = preverify[SQL_LASTSCAN]
preverify = getnow() - preverify
preverify = (preverify > MIN_LASTSCAN_DIFF)
if not preverify:
print('skipping ' + username)
continue
else:
preverify = False
user = r.get_redditor(username)
user.preverify = preverify
yield user
except requests.exceptions.HTTPError as he:
if he.response.status_code != 404:
raise he
availability = r.is_username_available(username)
availability = AVAILABILITY[availability]
yield [username, availability]
def process(users, quiet=False):
olds = 0
users = userify_list(users)
now = int(getnow())
current = 0
for user in users:
current += 1
data = [None] * SQL_COLUMNCOUNT
data[SQL_LASTSCAN] = now
preverify=False
if isinstance(user, list):
data[SQL_NAME] = user[0]
data[SQL_AVAILABLE] = AVAILABILITY[user[1]]
else:
h = human(user.created_utc)
data[SQL_IDINT] = b36(user.id)
data[SQL_IDSTR] = user.id
data[SQL_CREATED] = user.created_utc
data[SQL_HUMAN] = h
data[SQL_NAME] = user.name
data[SQL_LINK_KARMA] = user.link_karma
data[SQL_COMMENT_KARMA] = user.comment_karma
data[SQL_TOTAL_KARMA] = user.comment_karma + user.link_karma
data[SQL_AVAILABLE] = 0
preverify = user.preverify
# preverification happens within userify_list
x = smartinsert(data, '%04d' % current, preverified=preverify)
if x is False:
olds += 1
if quiet is False:
print('%d old' % olds)
p = process
def smartinsert(data, printprefix='', preverified=False):
'''
Originally, all queries were based on idint, but this caused problems
when accounts were deleted / banned, because it wasn't possible to
sql-update without knowing the ID.
'''
isnew = False
### Print message
if data[SQL_IDINT] is not None:
print('%s %s : %s : %s : %d : %d' % (
printprefix,
data[SQL_IDSTR],
data[SQL_HUMAN],
data[SQL_NAME],
data[SQL_LINK_KARMA],
data[SQL_COMMENT_KARMA]))
else:
statement = 'available' if data[SQL_AVAILABLE] is 1 else 'unavailable'
print('%s : %s' % (data[SQL_NAME], statement))
###
check = False
if not preverified:
cur.execute('SELECT * FROM users WHERE name=?', [data[SQL_NAME]])
check = cur.fetchone()
check = check is not None
if preverified or check:
data = [
data[SQL_IDINT],
data[SQL_IDSTR],
data[SQL_CREATED],
data[SQL_HUMAN],
data[SQL_LINK_KARMA],
data[SQL_COMMENT_KARMA],
data[SQL_TOTAL_KARMA],
data[SQL_AVAILABLE],
data[SQL_LASTSCAN],
data[SQL_NAME]]
cur.execute('UPDATE users SET idint=?, idstr=?, created=?, human=?, link_karma=?, comment_karma=?, total_karma=?, available=?, lastscan=? WHERE name=?', data)
else:
isnew = True
cur.execute('INSERT INTO users VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', data)
sql.commit()
return isnew
def get_from_listing(sr, limit, listfunction, submissions=True, comments=True, returnnames=False):
subreddit = r.get_subreddit(sr)
if limit is None:
limit = 1000
items = []
if submissions is True:
print('/r/%s, %d submissions' % (sr, limit))
subreddit.lf = listfunction
items += list(subreddit.lf(subreddit, limit=limit))
if comments is True:
print('/r/%s, %d comments' % (sr, limit))
items += list(subreddit.get_comments(limit=limit))
items = [x.author for x in items]
while None in items:
items.remove(None)
if returnnames is True:
return items
process(items)
def get_from_new(sr, limit=None, submissions=True, comments=True, returnnames=False):
listfunction = praw.objects.Subreddit.get_new
return get_from_listing(sr, limit, listfunction, submissions, comments, returnnames)
def get_from_top(sr, limit=None, submissions=True, comments=True, returnnames=False):
listfunction = praw.objects.Subreddit.get_top_from_all
return get_from_listing(sr, limit, listfunction, submissions, comments, returnnames)
def get_from_hot(sr, limit=None, submissions=True, comments=True, returnnames=False):
listfunction = praw.objects.Subreddit.get_hot
return get_from_listing(sr, limit, listfunction, submissions, comments, returnnames)
def fetchgenerator():
'''
Create an generator from cur fetches so I don't
have to use while loops for everything
'''
while True:
fetch = cur.fetchone()
if fetch is None:
break
yield fetch
def fetchwriter(outfile, spacer1=' ', spacer2=None, brief=False):
'''
Write items from the current sql query to the specified file
If two spacers are provided, it will flip-flop between them
on alternating lines
'''
flipflop = True
for item in fetchgenerator():
spacer = spacer1 if flipflop else spacer2
if brief:
item = memberformat_brief(item, spacer)
else:
item = memberformat_full(item, spacer)
print(item, file=outfile)
if spacer2 is not None:
flipflop = not flipflop
def show():
file_time = open('show\\time.txt', 'w')
file_name = open('show\\name.txt', 'w')
file_karma_total = open('show\\karma_total.txt', 'w')
file_karma_link = open('show\\karma_link.txt', 'w')
file_karma_comment = open('show\\karma_comment.txt', 'w')
file_available = open('show\\available.txt', 'w')
file_readme = open('README.md', 'r')
cur.execute('SELECT COUNT(*) FROM users')
totalitems = cur.fetchone()[0]
cur.execute('SELECT COUNT(*) FROM users WHERE idint IS NOT NULL')
validitems = cur.fetchone()[0]
print(totalitems, validitems)
print('Updating readme')
readmelines = file_readme.readlines()
file_readme.close()
readmelines[3] = '#####{0:,} accounts\n'.format(validitems)
readmelines = ''.join(readmelines)
file_readme = open('README.md', 'w')
file_readme.write(readmelines)
file_readme.close()
print('Writing time file.')
print(HEADER_FULL, file=file_time)
cur.execute('SELECT * FROM users WHERE idint IS NOT NULL ORDER BY created ASC')
fetchwriter(file_time)
file_time.close()
print('Writing name file.')
print(HEADER_FULL, file=file_name)
cur.execute('SELECT * FROM users WHERE idint IS NOT NULL ORDER BY LOWER(name) ASC')
fetchwriter(file_name)
file_name.close()
print('Writing karma total file.')
print(HEADER_FULL, file=file_karma_total)
cur.execute('SELECT * FROM users WHERE idint IS NOT NULL ORDER BY total_karma DESC, LOWER(name) ASC')
fetchwriter(file_karma_total)
file_karma_total.close()
print('Writing karma link file.')
print(HEADER_FULL, file=file_karma_link)
cur.execute('SELECT * FROM users WHERE idint IS NOT NULL ORDER BY link_karma DESC, LOWER(name) ASC')
fetchwriter(file_karma_link)
file_karma_link.close()
print('Writing karma comment file.')
print(HEADER_FULL, file=file_karma_comment)
cur.execute('SELECT * FROM users WHERE idint IS NOT NULL ORDER BY comment_karma DESC, LOWER(name) ASC')
fetchwriter(file_karma_comment)
file_karma_comment.close()
print('Writing available')
print(HEADER_BRIEF, file=file_available)
cur.execute('SELECT * FROM users WHERE available == 1 ORDER BY LOWER(name) ASC')
fetchwriter(file_available, spacer1=' ', brief=True)
file_available.close()
def commapadding(s, spacer, spaced, left=True, forcestring=False):
'''
Given a number 's', make it comma-delimted and then
pad it on the left or right using character 'spacer'
so the whole string is of length 'spaced'
Providing a non-numerical string will skip straight
to padding
'''
if not forcestring:
try:
s = int(s)
s = '{0:,}'.format(s)
except:
pass
spacer = spacer * (spaced - len(s))
if left:
return spacer + s
return s + spacer
def memberformat_full(data, spacer='.'):
idstr = data[SQL_IDSTR]
idstr = commapadding(idstr, spacer, 5, forcestring=True)
# Usernames are maximum of 20 chars
name = data[SQL_NAME]
name += spacer*(20 - len(name))
link_karma = data[SQL_LINK_KARMA]
comment_karma = data[SQL_COMMENT_KARMA]
total_karma = data[SQL_TOTAL_KARMA]
if link_karma is None:
link_karma = commapadding('None', spacer, 9)
comment_karma = commapadding('None', spacer, 9)
total_karma = commapadding('None', spacer, 10)
else:
link_karma = commapadding(link_karma, spacer, 9)
comment_karma = commapadding(comment_karma, spacer, 9)
total_karma = commapadding(total_karma, spacer, 10)
lastscan = data[SQL_LASTSCAN]
lastscan = human(lastscan)
out = MEMBERFORMAT_FULL % (
idstr,
data[SQL_HUMAN],
name,
link_karma,
comment_karma,
total_karma,
lastscan)
return out
def memberformat_brief(data, spacer='.'):
name = data[SQL_NAME]
lastscan = data[SQL_LASTSCAN]
lastscan = human(lastscan)
out = MEMBERFORMAT_BRIEF % (lastscan, name)
return out | tehp/reddit | Usernames/un.py | Python | mit | 11,873 |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all placements.
To create placements, run create_placement.py.
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
placement_service = client.GetService('PlacementService', version='v201405')
# Create statement to fetch all placements.
statement = dfp.FilterStatement()
# Get placements by statement.
while True:
response = placement_service.getPlacementsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for placement in response['results']:
print ('Placement with id \'%s\' and name \'%s\' was found.'
% (placement['id'], placement['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| dietrichc/streamline-ppc-reports | examples/dfp/v201405/placement_service/get_all_placements.py | Python | apache-2.0 | 1,709 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class SecurityRulePaged(Paged):
"""
A paging container for iterating over a list of :class:`SecurityRule <azure.mgmt.network.v2017_08_01.models.SecurityRule>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[SecurityRule]'}
}
def __init__(self, *args, **kwargs):
super(SecurityRulePaged, self).__init__(*args, **kwargs)
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/security_rule_paged.py | Python | mit | 952 |
# -*- coding: utf-8 -*-
{
'name': 'Stock EAN128',
'version': '1.0',
'category': 'Warehouse Management',
'sequence': 14,
'summary': '',
'description': """
Stock EAN128
============
""",
'author': 'ADHOC',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'product_expiry',
],
'data': [
'stock_view.xml',
'product_view.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| syci/ingadhoc-odoo-addons | stock_ean128/__openerp__.py | Python | agpl-3.0 | 608 |
# This is a basic program showing the functionality of the turtle module.
# It generates a very pretty spiraling pattern.
import turtle # import the turtle module so we can draw
import math # import the math module, we need this for e and pi
t = turtle.Pen() # set a variable to draw with
t.reset() # clear the screen just in case of leftovers
x = 0 # set some variables to play with
y = 5
z = 10
while x <= 999: # the more repeats, the larger the pattern
t.circle(x,13,2) # this basically sets up 3 arcs which can be varied
t.circle(y,17,3)
t.circle(z,19,5)
x = x + 1 # increment or decrement the radius of each arc by an interesting value
y = y / math.e
z = z / math.pi
| annamacmillan/python-drabbles | rosette.py | Python | mit | 760 |
import os
import sys
import collections
import numpy as np
import d4rl.infos
from d4rl.offline_env import set_dataset_path, get_keys
SUPPRESS_MESSAGES = bool(os.environ.get('D4RL_SUPPRESS_IMPORT_ERROR', 0))
_ERROR_MESSAGE = 'Warning: %s failed to import. Set the environment variable D4RL_SUPPRESS_IMPORT_ERROR=1 to suppress this message.'
try:
import d4rl.locomotion
import d4rl.hand_manipulation_suite
import d4rl.pointmaze
import d4rl.gym_minigrid
import d4rl.gym_mujoco
except ImportError as e:
if not SUPPRESS_MESSAGES:
print(_ERROR_MESSAGE % 'Mujoco-based envs', file=sys.stderr)
print(e, file=sys.stderr)
try:
import d4rl.flow
except ImportError as e:
if not SUPPRESS_MESSAGES:
print(_ERROR_MESSAGE % 'Flow', file=sys.stderr)
print(e, file=sys.stderr)
try:
import d4rl.kitchen
except ImportError as e:
if not SUPPRESS_MESSAGES:
print(_ERROR_MESSAGE % 'FrankaKitchen', file=sys.stderr)
print(e, file=sys.stderr)
try:
import d4rl.carla
except ImportError as e:
if not SUPPRESS_MESSAGES:
print(_ERROR_MESSAGE % 'CARLA', file=sys.stderr)
print(e, file=sys.stderr)
try:
import d4rl.gym_bullet
import d4rl.pointmaze_bullet
except ImportError as e:
if not SUPPRESS_MESSAGES:
print(_ERROR_MESSAGE % 'GymBullet', file=sys.stderr)
print(e, file=sys.stderr)
def reverse_normalized_score(env_name, score):
ref_min_score = d4rl.infos.REF_MIN_SCORE[env_name]
ref_max_score = d4rl.infos.REF_MAX_SCORE[env_name]
return (score * (ref_max_score - ref_min_score)) + ref_min_score
def get_normalized_score(env_name, score):
ref_min_score = d4rl.infos.REF_MIN_SCORE[env_name]
ref_max_score = d4rl.infos.REF_MAX_SCORE[env_name]
return (score - ref_min_score) / (ref_max_score - ref_min_score)
def qlearning_dataset(env, dataset=None, terminate_on_end=False, **kwargs):
"""
Returns datasets formatted for use by standard Q-learning algorithms,
with observations, actions, next_observations, rewards, and a terminal
flag.
Args:
env: An OfflineEnv object.
dataset: An optional dataset to pass in for processing. If None,
the dataset will default to env.get_dataset()
terminate_on_end (bool): Set done=True on the last timestep
in a trajectory. Default is False, and will discard the
last timestep in each trajectory.
**kwargs: Arguments to pass to env.get_dataset().
Returns:
A dictionary containing keys:
observations: An N x dim_obs array of observations.
actions: An N x dim_action array of actions.
next_observations: An N x dim_obs array of next observations.
rewards: An N-dim float array of rewards.
terminals: An N-dim boolean array of "done" or episode termination flags.
"""
if dataset is None:
dataset = env.get_dataset(**kwargs)
N = dataset['rewards'].shape[0]
obs_ = []
next_obs_ = []
action_ = []
reward_ = []
done_ = []
# The newer version of the dataset adds an explicit
# timeouts field. Keep old method for backwards compatability.
use_timeouts = False
if 'timeouts' in dataset:
use_timeouts = True
episode_step = 0
for i in range(N-1):
obs = dataset['observations'][i].astype(np.float32)
new_obs = dataset['observations'][i+1].astype(np.float32)
action = dataset['actions'][i].astype(np.float32)
reward = dataset['rewards'][i].astype(np.float32)
done_bool = bool(dataset['terminals'][i])
if use_timeouts:
final_timestep = dataset['timeouts'][i]
else:
final_timestep = (episode_step == env._max_episode_steps - 1)
if (not terminate_on_end) and final_timestep:
# Skip this transition and don't apply terminals on the last step of an episode
episode_step = 0
continue
if done_bool or final_timestep:
episode_step = 0
obs_.append(obs)
next_obs_.append(new_obs)
action_.append(action)
reward_.append(reward)
done_.append(done_bool)
episode_step += 1
return {
'observations': np.array(obs_),
'actions': np.array(action_),
'next_observations': np.array(next_obs_),
'rewards': np.array(reward_),
'terminals': np.array(done_),
}
def sequence_dataset(env, dataset=None, **kwargs):
"""
Returns an iterator through trajectories.
Args:
env: An OfflineEnv object.
dataset: An optional dataset to pass in for processing. If None,
the dataset will default to env.get_dataset()
**kwargs: Arguments to pass to env.get_dataset().
Returns:
An iterator through dictionaries with keys:
observations
actions
rewards
terminals
"""
if dataset is None:
dataset = env.get_dataset(**kwargs)
N = dataset['rewards'].shape[0]
data_ = collections.defaultdict(list)
# The newer version of the dataset adds an explicit
# timeouts field. Keep old method for backwards compatability.
use_timeouts = False
if 'timeouts' in dataset:
use_timeouts = True
episode_step = 0
for i in range(N):
done_bool = bool(dataset['terminals'][i])
if use_timeouts:
final_timestep = dataset['timeouts'][i]
else:
final_timestep = (episode_step == env._max_episode_steps - 1)
for k in dataset:
data_[k].append(dataset[k][i])
if done_bool or final_timestep:
episode_step = 0
episode_data = {}
for k in data_:
episode_data[k] = np.array(data_[k])
yield episode_data
data_ = collections.defaultdict(list)
episode_step += 1
| rail-berkeley/d4rl | d4rl/__init__.py | Python | apache-2.0 | 5,963 |
#pingpong
from vpython import *
import math
import random
import string
GRAVITY = -0.2
HALF_RADIUS_OF_BALL = 0.075
TIME_COEFFICIENT = 0.45
score = 0
game_over = False
color_list = [color.red, color.blue, color.black, color.yellow]
color_index = 0
computer = box (pos = vector(0, 0, -3), color = color.green, size = vector(1.5, 0.05, 0.30))
player = box (pos = vector(0, 0, 3), color = color.blue, size = vector(1.5, 0.05, 0.30))
ball = sphere(pos = vector(0, 2, 0), radius = 0.1, color = color_list[0])
glass = box(pos = vector(0, 1.5, 0), color = color.blue, size = vector(3, 3, 6), opacity = 0.1)
scoreboard = label(pos=vector(0, 3.8, 0), text ="", color = color.black)
scene.background = vector(.96, .96, .96)
game_over_text = text(pos=vector(-5, 0,0), text="GAME OVER", align=vector(0, 0, 0), depth = -0.1, color = color.red, visible = False)
scene.width = scene.height = 800
def prepare_game():
scene.forward = vector(0, -0.25, -1)
ball.velocity = vector(0,0,0.65)
ball.velocity.x = random.random()*0.5
ball.pos = vector(0, 2, 0)
game_over_text.visible = False
glass.visible = True
player.pos.x = 0
prepare_game()
while True:
if not game_over:
score = score + 1
scoreboard.text = "SCORE: %d" %score
ball.velocity.y = ball.velocity.y + GRAVITY*TIME_COEFFICIENT
ball.pos.x = ball.pos.x + ball.velocity.x*TIME_COEFFICIENT
ball.pos.y = ball.pos.y + ball.velocity.y*TIME_COEFFICIENT
ball.pos.z = ball.pos.z + ball.velocity.z*TIME_COEFFICIENT
plminx = player.pos.x - 0.75
plmaxx = player.pos.x + 0.75
plminz = player.pos.z - 0.15
cpminz = computer.pos.z + 0.15
cpmaxz = computer.pos.z - 0.15
computer.pos.x = ball.pos.x
## HALF_RADIUS_OF_BALL comes from the radius of the ball + height of slider above ground .i.e. 0.0
if (plminz <= ball.pos.z):
if (plminx <= ball.pos.x <= plmaxx):
if (ball.pos.y <= HALF_RADIUS_OF_BALL):
ball.pos.z = 2.85
ball.pos.y = 0.076
ball.velocity.x = random.random()*0.5
ball.velocity.y = -ball.velocity.y
ball.velocity.z = -ball.velocity.z
score = score + 100
color_index = (color_index + 1)%4
ball.color = color_list[color_index]
if (cpminz >= ball.pos.z):
if (ball.pos.y < HALF_RADIUS_OF_BALL):
ball.pos.z = -2.85
ball.pos.y = 0.076
ball.velocity.x = random.random()*0.5
ball.velocity.y = -ball.velocity.y
ball.velocity.z = -ball.velocity.z
color_index = (color_index + 1)%4
ball.color = color_list[color_index]
if (ball.pos.x <= -1.5):
ball.pos.x = -1.5
ball.velocity.x = -ball.velocity.x
if (ball.pos.x >= 1.5):
ball.pos.x = 1.5
ball.velocity.x = -ball.velocity.x
if (ball.pos.y <= 0):
game_over = True
glass.visible = False
game_over_text.visible = True
print ('Game over!!')
k = keysdown()
if not game_over:
#move left
if 'a' in k or 'A' in k:
if player.pos.x > -1:
player.pos.x = player.pos.x - 1
#move right
if 'd' in k or 'D' in k:
if player.pos.x < 1:
player.pos.x = player.pos.x + 1
else:
#Restart the game
if 'r' in k or 'R' in k:
TIME_COEFFICIENT = 0.45
score = 0
game_over = False
prepare_game()
rate(18)
| shafeeqr2/python | Ping Pong Game/PingPongGame.py | Python | gpl-3.0 | 3,798 |
""" This module is responsible for finding properties
related to bond interface and bond member interfaces """
from collections import OrderedDict
import netshowlib.linux.iface as linux_iface
import netshowlib.linux.bridge as linux_bridge
import netshowlib.linux.lacp as lacp
import re
import io
class Bond(linux_iface.Iface):
""" Linux Bond attributes
* **members**: list of bond members/slaves. creates instances of \
:class:`BondMember<netshowlib.linux.bond_member.BondMember>`
* **bond mode**: options are
* *balance-rr '0'*
* *active-backup '1'*
* *balance-xor '2'*
* *balance-alb '3'*
* *802.3ad '4'*
* *balance-tlb '5'*
* *balance-alb '6'*
* **min_links**: number of minimum links
* **hash_policy**: load balancing algorithm. options are
* *layer2 '0'*
* *layer3+4 '1'*
* **lacp**: pointer to :class:`Lacp instance<netshowlib.linux.lacp.Lacp>` for this \
bond
* **system_mac**: Bond system mac. Packets egressing bond use this mac address.
"""
def __init__(self, name, cache=None):
linux_iface.Iface.__init__(self, name, cache)
self._members = {}
self._mode = None
self._min_links = None
self._hash_policy = None
self._lacp = None
self._system_mac = None
self._stp = None
self._bridge_masters = {}
self.bridge = linux_bridge
self._cache = cache
self.bondmem_class = BondMember
self.lacp_class = lacp.Lacp
self.bondfileloc = '/proc/net/bonding'
# -------------------
def _parse_proc_net_bonding(self, bondfile):
"""
parse ``/proc/net/bonding`` of this bond to get the system mac
eventually this info will be in the kernel. I believe its
kernel 3.18 or something. will confirm with a kernel dev.
:param bondfile: path to /proc/net file for the bond
"""
try:
result = io.open(bondfile).read()
except (ValueError, IOError):
return
fileio = io.StringIO(result)
for line in fileio:
if len(line.strip()) <= 0:
continue
# make all char lowercase
line = line.lower()
# determine mac address of the bond
if re.match(r'system\s+identification', line):
self._system_mac = line.split()[-1]
continue
# ---------------------
# Define properties
@property
def stp(self):
"""
:return: KernelStpBridgeMember instance
"""
if not self._stp:
self._stp = linux_bridge.KernelStpBridgeMember(self,
self._cache)
return self._stp
@property
def bridge_masters(self):
"""
:return: list of bridges associated with this port \
and its subinterfaces.
"""
self._bridge_masters = {}
bridgename = self.read_symlink('brport/bridge')
if bridgename:
if linux_bridge.BRIDGE_CACHE.get(bridgename):
bridgeiface = linux_bridge.BRIDGE_CACHE.get(bridgename)
else:
bridgeiface = self.bridge.Bridge(bridgename, cache=self._cache)
self._bridge_masters[bridgeiface.name] = bridgeiface
for subintname in self.get_sub_interfaces():
subiface = linux_iface.Iface(subintname)
bridgename = subiface.read_symlink('brport/bridge')
if bridgename:
if linux_bridge.BRIDGE_CACHE.get(bridgename):
bridgeiface = linux_bridge.BRIDGE_CACHE.get(bridgename)
else:
bridgeiface = self.bridge.Bridge(bridgename, cache=self._cache)
self._bridge_masters[bridgeiface.name] = bridgeiface
return self._bridge_masters
@property
def members(self):
"""
:return: list of bond members
"""
fileoutput = self.read_from_sys('bonding/slaves')
# if bond member list has changed..clear the bond members hash
if fileoutput:
if set(fileoutput.split()) != set(self._members.keys()):
self._members = OrderedDict()
for i in fileoutput.split():
self._members[i] = self.bondmem_class(i, master=self)
else:
self._members = {}
return self._members
@property
def mode(self):
"""
:return: bond mode integer. Not the name. See \
`linux kernel driver docs <http://bit.ly/1BSyeVh>`_ for more details
"""
self._mode = None
fileoutput = self.read_from_sys('bonding/mode')
if fileoutput:
self._mode = fileoutput.split()[1]
return self._mode
@property
def min_links(self):
"""
:return: number of minimum links required to keep the bond active
"""
self._min_links = self.read_from_sys('bonding/min_links')
return self._min_links
@property
def hash_policy(self):
"""
:return: bond load balancing policy / xmit hash policy
"""
self._hash_policy = None
fileoutput = self.read_from_sys('bonding/xmit_hash_policy')
if fileoutput:
self._hash_policy = fileoutput.split()[1]
return self._hash_policy
@property
def lacp(self):
"""
:return: :class:`linux.lacp<netshowlib.linux.lacp.Lacp>` class instance if \
bond is in LACP mode
"""
if self.mode == '4':
if not self._lacp:
self._lacp = self.lacp_class(self.name)
return self._lacp
return None
@property
def system_mac(self):
"""
:return: bond system mac
"""
self._system_mac = None
bond_proc_file = "%s/%s" % (self.bondfileloc, self.name)
self._parse_proc_net_bonding(bond_proc_file)
return self._system_mac
def __str__(self):
"""
string output function for the class
"""
return "Linux Bond Interface '%s'. Member Count: %s" % (self.name,
len(self.members.keys()))
class BondMember(linux_iface.Iface):
""" Linux Bond Member Attributes
* **master**: pointer to :class:`Bond<netshowlib.linux.bond.Bond>` instance \
that this interface belongs to. This can be provided in the ``__init__`` \
function
* **linkfailures**: bond driver reports number of times bond member flaps
* **bondstate**: returns whether bond member is active (1) or inactive(0) in a bond \
**irrespective** of its carrier/linkstate status. What this means is that \
the link can be up, but not in the bond.
Examples:
.. code-block:: python
import netshowlib.netshowlib as nn
# bond member info should be normally obtained from
# first calling the bond and then running the members
# property.
bond0 = nn.bond.Bond('bond0')
print len(bond0.members.keys())
>> 2
# on the rare case you know the bond member but want to get
# bond master information you can.
bondmem = nn.bond_member.BondMember('eth1')
print bondmem.master
>> Linux Bond Interface 'bond0'. Member Count: 1
"""
def __init__(self, name, cache=None, master=None):
linux_iface.Iface.__init__(self, name, cache)
self._master = master
self._linkfailures = 0
self._bondstate = None
self.bond_class = Bond
self.bondfileloc = '/proc/net/bonding'
# -------------------
# Get link failure count.
# determine if member is in bond by checking agg ID
# parse /proc/net/bonding to get this info
# J Toppins informed me that this is most generic way to get
# bonding info across multiple linux platforms.
# grabbing it from /sys/class/net is not super reliable
# eventually everything can be grabbed from netlink, which will be done
# in a future release.
def _parse_proc_net_bonding(self):
"""
parse /proc/net/bonding to get link failure and agg_id info
"""
# open proc/net/bonding
if not self.master:
return
bondfile = "%s/%s" % (self.bondfileloc, self.master.name)
try:
result = io.open(bondfile).read()
except (ValueError, IOError):
return
bondslavename = None
fileio = io.StringIO(result)
master_agg_id = None
for line in fileio:
if len(line.strip()) <= 0:
continue
# make all char lowercase
line = line.lower()
# get bondslave name
if re.match(r'slave\s+interface', line):
bondslavename = line.split()[-1]
continue
elif re.match(r'\s+aggregator\s+id', line):
master_agg_id = line.split()[-1]
continue
elif re.match(r'aggregator\s+id', line):
if bondslavename == self.name:
agg_id = line.split()[2]
_state = 1 if master_agg_id == agg_id else 0
self._bondstate = _state
elif re.match(r'link\s+failure', line):
_count = line.split()[-1]
if bondslavename == self.name:
self._linkfailures = int(_count)
# -------------------
# Define properties
@property
def master(self):
"""
:return: pointer to :class:`Bond<netshowlib.linux.bond.Bond>` instance that \
this interface belongs to
"""
if hasattr(self, '_master'):
if not self._master:
bondname = self.read_symlink('master')
self._master = self.bond_class(bondname)
return self._master
@property
def bondstate(self):
"""
:return: state of interface in the bond. can be 0(inactive) or 1(active)
"""
# if LACP check /proc/net/bonding for agg matching state
if self.master and self.master.mode == '4':
self._parse_proc_net_bonding()
else:
self._bondstate = 1 if self.linkstate == 2 else 0
return self._bondstate
@property
def linkfailures(self):
"""
number of mii transitions bond member reports while the bond is active
this counter cannot be cleared. will reset when the bond is reinitialized
via the ifdown/ifup process
:return: number of mii transitions
"""
self._parse_proc_net_bonding()
return self._linkfailures
| CumulusNetworks/netshow-linux-lib | netshowlib/linux/bond.py | Python | gpl-2.0 | 10,806 |
#!/usr/bin/python
# \file VARION.py
# This script implements the VARION algorithm for real-time
# detection of sTEC variations using GNSS observations.
# author Giorgio Savastano, 23.10.2015. giorgio.savastano[AT]gmail.com
# modified by Michela Ravanelli, michela.ravanelli[AT]uniroma1.it
#
# Notice: Please acknowledge the use of the above software in any publications:
# ``VARION software was provided by G. Savastano et al.,
# and is available at URL: https://github.com/giorgiosavastano/VARION''.
#
# Reference: Savastano, G. et al. Real-Time Detection of Tsunami Ionospheric Disturbances with a Stand-Alone
# GNSS Receiver: A Preliminary Feasibility Demonstration. Sci. Rep. 7, 46607; doi: 10.1038/srep46607 (2017).
#
# Please send a copy of such publications to either G. Savastano or A. Komjathy:
# Giorgio Savastano Dr. Attila Komjathy
# Civil, Building and Environmental Engineering, Ionospheric and Atmospheric Remote Sensing Group,
# University of Rome "La Sapienza" Jet Propulsion Laboratory, California Institute of Technology,
# Rome, Italy. Pasadena, California, USA.
# E-mail: giorgio.savastano[AT]uniroma1.com E-mail: attila.komjathy[AT]jpl.nasa.gov
#
# -------------------------------------------------------------------------
#
# Copyright (C) 2015-2020 (see AUTHORS file for a list of contributors)
#
# VARION is a opean source software for GNSS processing
#
# This file is part of VARION.
#
# VARION is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# VARION is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with VARION. If not, see <http://www.gnu.org/licenses/>.
#
#------------------------------------------------------------------------
## IMPORT MODULES AND CLASSES ##
import argparse
import os
import glob
import numpy as np
#
import myObs as mO
import myFunc as mF
import mySatsFunc as mSF
import RinexClass as RC
import pdb
parser = argparse.ArgumentParser(prog="VARION.py", description= """VARION.py is a script that process RINEX obs files
and apply the VARION algorithm in order to obtain sTEC measuraments.
author: Giorgio Savastano - giorgiosavastano@gmail.com
author: Michela Ravanelli - michela.ravanelli@uniroma1.it """)
parser.add_argument("-staz", type=str, nargs='*', default="all", dest="stazName", help="This argument determines the station(s) will be processed." \
" By default, this parameter is set to process all the RINEX observation files in the working folder (/obs). ")
parser.add_argument("-time", nargs='*', type=str, default="all", dest="analysisTime",
help="If no argument is given, the analysis is executed for " \
" all the time vector of the obs file." \
" Otherwise, the argument refers to the time for which the analysis"\
" should be performed and has to be in the format hh:min (GPS time)"\
"(e.g., 18:34 19:00)")
parser.add_argument("-sat", type=str, nargs='*', default=0, dest="satNumber", help="This argument determines the satellite(s) will be considered." \
"By default, this parameter is set to process all the satellites in view for each epochs."\
"(e.g., G01 G05 G23)")
parser.add_argument('-brdc', dest="brdcOrb", action='store_true', help="This argument set the processing with the brdc file")
parser.add_argument('-height', type=int, default=300, dest="hIono", help='This argument determines the ionospheric shell height'\
'By default, this value is set to 300 km')
print "VARION is a free and open source software that processes RINEX obs files in order to estimate sTEC values."
print "author: Giorgio Savastano - giorgiosavastano@gmail.com "
print "author: Michela Ravanelli - michela.ravanelli@uniroma1.it"
print " "
########################################################
## CLASSES ##
class myStation:
""" This is the simple class to describe a GNSS station """
def __init__ (self):
self.name = ""
self.oFile = "" ## RINEX obs files
self.GPSnFile = "" ## RINEX GPS nav file
self.GLOnFile = "" ## RINEX GLO nav file
self.skyFile = "" ## VADASE sky file
self.brdcFile = ""
self.process_able = False
def VADASE_PROCESS_ABLE(self):
""" This function checks if the station has the observation and sky
file available in the processing folder. If this is the
case the "process_able" variable is set to True """
if os.path.isfile(self.oFile) and os.path.isfile(self.GPSnFile):
self.process_able = True
elif os.path.isfile(self.brdcFile):
self.process_able = True
########################################################
## VARIABLES ##
#### Constant ####
L1 = 1.57542e9 # HZ
L2 = 1.22760e9 # HZ
A = 40.308e16
c = 299792458.0 # m/s
const_tec = ((L1**2)*(L2**2))/(A*(L1**2-L2**2))
sats = np.asarray( ['G01','G02','G03','G04','G05','G06','G07','G08','G09','G10','G11','G12',\
'G13','G14','G15','G16','G17','G18','G19','G20','G21','G22','G23','G24',\
'G25','G26','G27','G28','G29','G30','G31'] )
########################################################
os.chdir('..')
main_dir = os.getcwd()
obs_dir = main_dir + '/obs'
out_dir = main_dir + '/outputs'
os.chdir('obs')
## PROGRAM STARTS ##
args = parser.parse_args()
print args
h_iono = args.hIono * 1000.0 # height of the ionospheric layer
if args.stazName == "all":
stations = glob.glob('*.??o')
stations.sort()
else:
statio = args.stazName
suffix = glob.glob(statio[0] + '*.??o')[0][4:]
stations = [ sta + suffix for sta in statio ]
stations.sort()
##########################################################
#### METTERE POI OPZIONE PER IL FILE BRDC E IGS ------- > IMPORTANTE
if args.brdcOrb == True:
brdc_file = glob.glob ( 'brdc' + '*.??n')
print brdc_file
## COUNT HOW MANY NAVIGATION FILES ARE NOT AVAILABLE ##
myStationsProc = [] # List of stations that will be processed
for sFile in stations:
alreadyThere = False
for station in myStationsProc:
if sFile[0:4] == station.name:
## The station is already in the list #
## check if it has the observation and sky file #
## and if not assign them #
if args.brdcOrb == True:
station.brdcFile = brdc_file[0]
if not station.oFile:
station.oFile = sFile
if not station.GPSnFile:
sGPSnFile = sFile[:-1] + 'n'
if os.path.isfile(sGPSnFile):
station.GPSnFile = sGPSnFile
station.VADASE_PROCESS_ABLE()
alreadyThere = True
break
## The station is not in the list
if not alreadyThere:
sStation = myStation()
sStation.name = sFile[0:4]
sStation.oFile = sFile
sGPSnFile = sFile[:-1] + 'n'
if os.path.isfile(sGPSnFile):
sStation.GPSnFile = sGPSnFile
if args.brdcOrb == True:
sStation.brdcFile = brdc_file[0]
sStation.VADASE_PROCESS_ABLE()
myStationsProc.append(sStation)
for i in myStationsProc:
print i.name, i.oFile, i.GPSnFile, i.brdcFile, i.process_able
##########################################################
if args.analysisTime != "all":
start = int(args.analysisTime[0][:2])*60.0*60.0 + int(args.analysisTime[0][3:5])*60.0
stop = int(args.analysisTime[1][:2])*60.0*60.0 + int(args.analysisTime[1][3:5])*60.0
print start, stop
if args.satNumber == 0:
sats_write = sats
print sats_write
else:
sats_write = np.asarray(args.satNumber)
sats_write.sort()
print sats_write
################################################################################
## EXECUTE VARION ##
info_file = open( "info.txt" , "w" )
for i in myStationsProc:
if i.process_able:
if args.brdcOrb == True:
rinex_nav = brdc_file[0]
else:
rinex_nav = i.GPSnFile
# CREATE THE RINEX OBJECT FROM THE CLASS RinexFile()
rinex_obs = RC.RinexFile( i.oFile )
lat_g,lon_g, h = mF.coord_geog( rinex_obs.xyz[0],rinex_obs.xyz[1],rinex_obs.xyz[2] )
info_file.write( str(rinex_obs.nam)+ "\t" + str(rinex_obs.int) + "\t" + str(lat_g) + "\t" + str(lon_g) + "\n" )
##
# read the rinex with the method built inside the class
import time
start_time = time.time()
rinex_obs.READ_RINEX()
rinex_obs.COORD_GEOG()
print "RINEX file %s has been read in" % rinex_obs.nam
print("--- %s seconds ---" % (time.time() - start_time))
rinex_obs.data = mSF.skip_nan(rinex_obs,rinex_obs.data[5])
#select just the satellite in view
sats_write_1 = mSF.sat_selection( rinex_obs, sats_write, start, stop )
try:
start_time = time.time()
sIP = mSF.coord_satellite( rinex_nav, rinex_obs, sats_write_1)
print "Coord satellites has been computed in"
print("--- %s seconds ---" % (time.time() - start_time))
except ValueError:
print 'station ' + str(rinex_obs.nam) + ' has been skipped'
continue
################################################################################
lista_G = []
sIP_G_list = []
data_list = []
start_time = time.time()
for sa in sats_write_1:
varion = mO.obs_sat( rinex_obs.data[0], rinex_obs.data[1], rinex_obs.data[2], rinex_obs.data[3], rinex_obs.data[4], sa )
data_list.append( rinex_obs.data )
lista_G.append( varion )
sIP_sat = mSF.track_sat( sIP, sa, start, stop )
####
phi_ipp, lambda_ipp, h_ipp = mSF.coord_ipps( rinex_obs.xyz[0],rinex_obs.xyz[1],rinex_obs.xyz[2], sIP_sat[2], sIP_sat[3], sIP_sat[4], h_iono)
sIP_G_list.append( (sIP_sat[0],sIP_sat[1],phi_ipp,lambda_ipp,sIP_sat[6],sIP_sat[7]) )
print "VARION algorithm has been computed and"
print "IPP location has been computed for the satellites selected in"
print("--- %s seconds ---" % (time.time() - start_time))
################################################################################
### REMOVE THE OUTLAYER
stec_list = []
sod_list = []
for i in xrange( len(sats_write_1) ):
mask = mF.no_outlayer_mask( lista_G[i][0] * const_tec / rinex_obs.int ) ## modify the treshold to remove the outlayer
stec_list.append( lista_G[i][0][mask] * const_tec / rinex_obs.int )
sod_list.append( lista_G[i][2][mask] )
################################################################################
### POLINOMIAL INTERPOLATION OF THE DATA
X_list = []; Y_list = []
mask_list = []
diff_list = []
cum_list = []
import warnings
warnings.simplefilter('ignore', np.RankWarning)
for i in xrange( len(sats_write_1) ):
X = sod_list[i]
Y = stec_list[i]
mask = (X>=start) & (X<=stop)
try:
p = np.poly1d( np.polyfit(X[mask], Y[mask], 10) )
interpo = p( X[mask] )
# residual
diff = Y[mask] - interpo
# integrate
cum = mF.integrate( diff, rinex_obs.int )
# append
X_list.append(X)
Y_list.append(Y)
mask_list.append(mask)
diff_list.append(diff)
cum_list.append(cum)
except (TypeError, IndexError):
X_list.append(0.0)
Y_list.append(0.0)
mask_list.append(0.0)
diff_list.append(0.0)
cum_list.append(0.0)
################################################################################
### Create the .txt file
################################################################################
for i in xrange( len(sats_write_1) ):
mask = (sIP_G_list[i][0] >= start) & (sIP_G_list[i][0] <= stop)
f = open(out_dir + '/' + str( rinex_obs.nam ) +'_' + str(sats_write_1[i]) + '_' + str(args.hIono) + '.txt', 'w')
f.write('sod' + '\t' + '\t' + '\t' + 'sTEC' + '\t' + '\t'+ '\t' 'lon' + '\t' + '\t'+ '\t' 'lat'+ '\t' + '\t'+ '\t' 'ele' + '\t' + '\t'+ '\t' 'azi' '\n')
try:
for k in xrange( len(cum_list[i]) ):
try:
#### FIX DIFF OF TIME BETWEEN COORDINATES AND STEC (ONE COME FROM NAVIGATION FILE THE OTHER FROM OBS)
## BUG FIXED --> try with 30 s data
inde = (np.where(X_list[i][mask_list[i]][k] == sIP_G_list[i][0][mask]) )
f.write( str(sIP_G_list[i][0][mask][inde[0][0]]) + '\t' + '\t' + str(cum_list[i][k]) + '\t' + '\t' + \
str(sIP_G_list[i][3][mask][inde[0][0]]) + '\t' + '\t' + str(sIP_G_list[i][2][mask][inde[0][0]]) + \
'\t' + '\t' + str(sIP_G_list[i][-1][mask][inde[0][0]])+'\t' + '\t' +str(sIP_G_list[i][4][mask][inde[0][0]]) +'\n')
except IndexError:
continue
except TypeError or IndexError:
continue
f.close()
info_file.close()
| giorgiosavastano/VARION | scripts/VARION.py | Python | gpl-3.0 | 13,099 |
# environment variables
ATOM_PROGRAM = '/home/physics/bin/atm'
ATOM_UTILS_DIR ='/home/physics/bin/pseudo'
element = "Al"
equil_volume = 16.4796
# general calculation parameters
calc = {"element": element,
"lattice": "FCC",
"xc": "pb",
"n_core": 3,
"n_val": 2,
"is_spin_pol": False,
"core": True,
}
# pseudopotential parameters
electrons = [2, 1]
radii = [2.4, 2.8, 2.3, 2.3, 0.7]
# SIESTA calculation parameters
siesta_calc = {"element": element,
"title": element + " SIESTA calc",
"xc_f": "GGA",
"xc": "PBE"
}
# electronic configurations
configs = [[1.5, 1.5],
[1, 2],
[0.5, 2.5],
[0, 3]]
# number of atoms in cubic cell
_nat_cell = {"SC": 1,
"BCC": 2,
"FCC": 4}
nat = _nat_cell[calc["lattice"]]
| ansobolev/PseudoGenerator | settings/settings_Al.py | Python | mit | 879 |
import os
import shlex
import subprocess
from tempfile import mkstemp, mkdtemp
def find_files(directory:str, extension:str= '', ignore_mark=None):
"""
Walk recursively :arg directory,
ignoring dirs that contain file named :arg ignore_mark,
and return all files matching :arg extension
"""
if not extension.startswith('.'):
extension = '.' + extension
matching_files = list()
for top, subdirs, files in os.walk(directory, followlinks=True):
if ignore_mark in files:
subdirs.clear()
continue
for f in files:
if f.endswith(extension):
matching_files.append(os.path.join(top, f))
return matching_files
def execute_shell(cmd, input='', cmd_uses_shell_tricks=False) -> (int, str, str):
"""
Execute cmd, send input to stdin.
:param cmd_uses_shell_tricks: set True when need redirecting and pipe-lining
:return: returncode, stdout, stderr.
"""
proc_stdin = subprocess.PIPE if input != '' and input is not None else None
proc_input = input if input != '' and input is not None else None
if not cmd_uses_shell_tricks: # TODO: detect automatically
args = shlex.split(cmd)
else:
args = cmd
p = subprocess.Popen(args,
stdin=proc_stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=cmd_uses_shell_tricks
)
out, err = p.communicate(proc_input)
return p.returncode, \
str(out, encoding='utf-8'), \
str(err, encoding='utf-8')
def get_tmp_file_name():
(fd, name) = mkstemp()
os.close(fd)
return name
def get_tmp_dir_name():
return mkdtemp()
def cat(test):
f = open(test)
res = f.readlines()
f.close()
return res
def readfile(file_name):
with open(file_name) as f:
return f.read()
| 5nizza/bene | utils.py | Python | mit | 1,962 |
#!/usr/bin/env python
"""
File to plot the Learning Curve of a Random Forrest
"""
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
import matplotlib as mpl
from sklearn.model_selection import KFold
from sklearn.model_selection import learning_curve
mpl.use('TkAgg')
import matplotlib.pyplot as plt
__author__ = 'Brandon Watts'
__credits__ = ['Casey Beaird', 'Chase Greco']
__license__ = 'MIT'
__version__ = '0.1'
def split_labels_and_vectors(csv_path,label_name):
"""
Method used to split a csv into two dataframes: labels and vectors
:param csv_path: Path to the CSV file
:param label_name: Name of the label column
:return: label and vector dataframes
"""
df = pd.read_csv(csv_path)
df_labels = df[label_name].values.tolist()
df_vectors = df.drop([label_name], axis=1).values
return df_labels, df_vectors
def plot_curve(x, y, folds):
"""
Method used to plot the Learning Curve
:param x: vectors
:param y: labels
:param folds: how many folds for cross-validation
"""
# Create and Train a classifier
classifier = RandomForestClassifier(n_jobs=-1, max_features=None, oob_score=True,
n_estimators=63, max_depth=30, min_samples_leaf=1)
classifier.fit(x, y)
# Create the Learning Curve for the Classifier
train_sizes, train_scores, test_scores = learning_curve(classifier, x, y, n_jobs=-1, cv=folds,
train_sizes=np.linspace(.1, 1.0, 5), verbose=0)
# Extract all the stats for the plot
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
# Create the plot
plt.figure()
plt.title("RandomForestClassifier")
plt.legend(loc="best")
plt.xlabel("Training examples")
plt.ylabel("Score")
plt.gca().invert_yaxis()
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1,
color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score")
plt.ylim(-.1, 1.1)
plt.show()
cv = KFold(10, shuffle=True)
labels, vectors = split_labels_and_vectors(csv_path="../../vectors.csv", label_name="Entity_ID")
plot_curve(vectors, labels, cv)
| cBeaird/SemEval_Character-Identification-on-Multiparty-Dialogues | Classifiers/Random_Forest/Learning_Curve.py | Python | mit | 2,718 |
from gazetteer.models import GazSource,GazSourceConfig,LocationTypeField,CodeFieldConfig,NameFieldConfig
from skosxl.models import Concept, Scheme, MapRelation
from gazetteer.settings import TARGET_NAMESPACE_FT
(sch,created) = Scheme.objects.get_or_create(uri=TARGET_NAMESPACE_FT[:-1], pref_label="Gaz Feature types")
try:
(ft,created) = Concept.objects.get_or_create(term="PPL", pref_label="Populated Place", definition = "def", scheme = sch)
except:
pass
# now set up cross references from NGA feature types namespace
sch2 = Scheme.objects.create(uri="http://www.geonames.org/ontology#", pref_label="NGA gaz codes")
ft2 = Concept.objects.create(term="PPLA", pref_label="Populated Place", definition = "def", scheme = sch2)
mr = MapRelation.objects.create(match_type= 1, origin_concept=ft2 , uri="".join((TARGET_NAMESPACE_FT,"PPL")))
# now set up harvest config
try:
GazSourceConfig.objects.delete(name="Geonames country file dump")
except:
pass
try:
config=GazSourceConfig.objects.create(lat_field="lat", name="Geonames country file dump", long_field="long")
LocationTypeField.objects.create(field='dsg',namespace="https://gazetteer.mapstory.org/def/ft/nga/", config=config)
NameFieldConfig.objects.create(config=config,language="", as_default=True, languageNamespace="http://geonames.nga.mil/def/lang/", field="full_name_", languageField="LC",name_type = 'Endonym')
NameFieldConfig.objects.create(config=config,language="", as_default=False, languageNamespace="http://geonames.nga.mil/def/lang/", field="full_nam_1", languageField="LC",name_type = 'Exonym')
CodeFieldConfig.objects.create(config=config,field="ufi",namespace="http://geonames.nga.mil/id/")
(s,created) = GazSource.objects.get_or_create(source="tu_sample", config=config, source_type="mapstory")
print (s,created)
except Exception as e:
print "skipping ", e
| rob-metalinkage/django-gazetteer | gazetteer/fixtures/mapstory_geonames_config.py | Python | cc0-1.0 | 1,907 |
#!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Installs and runs (a subset of) the gcc toolchain test suite against
various nacl and non-nacl toolchains
'''
import glob
import os
import os.path
import sys
# Hack to get buildbot_lib. Fix by moving scripts around?
sys.path.append(os.path.abspath(os.path.join(os.getcwd(),'buildbot')))
import buildbot_lib
# Config
TEST_SUITE_BASE = os.path.join('toolchain_build', 'src', 'pnacl-gcc',
'gcc', 'testsuite')
TEST_PATH_C = os.path.join(TEST_SUITE_BASE, 'gcc.c-torture', 'execute')
TEST_PATH_CPP = os.path.join(TEST_SUITE_BASE, 'g++.dg')
def usage():
print 'Usage:', sys.argv[0], '<compiler> <platform>',
print '[<args for toolchain_tester.py>]'
def list_tests(src_base, *glob_path):
if not os.path.isdir(src_base):
raise Exception('Torture test source directory missing: ' + src_base)
glob_pattern = os.path.join(src_base, *glob_path)
test_list = glob.glob(glob_pattern)
if not test_list:
raise Exception('Empty result list from glob pattern: ' + glob_pattern)
return test_list
def standard_tests(context, config, exclude, extra_args):
# TODO: make toolchain_tester.py runnable as a library?
command = ['tools/toolchain_tester/toolchain_tester.py',
'--exclude=tools/toolchain_tester/' + exclude,
'--exclude=tools/toolchain_tester/known_failures_base.txt',
'--config=' + config,
'--append=CFLAGS:-std=gnu89']
if 'pnacl' in config:
command.append('--append_file=tools/toolchain_tester/extra_flags_pnacl.txt')
command.extend(extra_args)
command.extend(list_tests(TEST_PATH_C, '*c'))
command.extend(list_tests(TEST_PATH_C, 'ieee', '*c'))
print command
try:
return buildbot_lib.Command(context, command)
except buildbot_lib.StepFailed:
return 1
def eh_tests(context, config, exclude, extra_args):
# TODO: toolchain_tester.py runnable as a library?
command = ['tools/toolchain_tester/toolchain_tester.py',
'--exclude=tools/toolchain_tester/' + exclude,
'--exclude=tools/toolchain_tester/unsuitable_dejagnu_tests.txt',
'--config=' + config]
if 'pnacl' in config:
command.append('--append_file=tools/toolchain_tester/extra_flags_pnacl.txt')
command.append('--append=CFLAGS:--pnacl-exceptions=sjlj')
command.extend(extra_args)
command.extend(list_tests(TEST_PATH_CPP, 'eh', '*.C'))
print command
try:
return buildbot_lib.Command(context, command)
except buildbot_lib.StepFailed:
return 1
def run_torture(status, compiler, platform, extra_args):
if platform not in ('x86-32', 'x86-64', 'arm'):
print 'Unknown platform:', platform
config_map = { 'pnacl': 'llvm_pnacl',
'naclgcc': 'nacl_gcc',
'localgcc': 'local_gcc',
'clang': 'nacl_clang'}
failures = []
if compiler == 'pnacl':
# O3_O0 is clang -O3 followed by pnacl-translate -O0
optmodes = ['O0', 'O3', 'O0_O0', 'O3_O0']
if platform in ('arm', 'x86-32', 'x86-64'):
# Add some extra Subzero configurations.
optmodes.extend(['O3_sz', 'O3_O0_sz'])
# TODO(stichnot): Consider pruning some configurations if the tests run
# too long.
else:
optmodes = ['O0', 'O3']
for optmode in optmodes:
# TODO: support an option like -k? For now, always keep going
config = '_'.join((config_map[compiler], platform, optmode))
eh_config = ('_'.join((config_map[compiler] + '++', platform, optmode))
if compiler =='clang' else config)
# Test C++ exception handling.
retcode = eh_tests(status.context, eh_config,
'known_eh_failures_' + compiler + '.txt', extra_args)
if retcode:
failures.append(optmode + ' eh')
# Run the normal (non-exception-handling) tests.
retcode = standard_tests(
status.context, config,
'known_failures_' + compiler + '.txt', extra_args)
if retcode:
failures.append(optmode + ' standard')
if len(failures) > 0:
print 'There were failed steps in modes:', failures
return 1
return 0
def main():
context = buildbot_lib.BuildContext()
buildbot_lib.SetDefaultContextAttributes(context)
context['max_jobs'] = int(os.environ.get('PNACL_CONCURRENCY', 4))
status = buildbot_lib.BuildStatus(context)
# TODO(dschuff): it's a pain to pass through unknown arguments with optparse,
# but if we add more, or once we have argparse (python2.7) everywhere, switch.
try:
compiler = sys.argv[1]
platform = sys.argv[2]
tester_argv = sys.argv[3:]
except IndexError:
usage()
sys.exit(1)
return run_torture(status, compiler, platform, tester_argv)
if __name__ == '__main__':
sys.exit(main())
| sbc100/native_client | tools/toolchain_tester/torture_test.py | Python | bsd-3-clause | 4,893 |
from pytest import raises
from sanic.app import Sanic
from sanic.blueprints import Blueprint
from sanic.request import Request
from sanic.response import HTTPResponse, text
MIDDLEWARE_INVOKE_COUNTER = {"request": 0, "response": 0}
AUTH = "dGVzdDp0ZXN0Cg=="
def test_bp_group_indexing(app: Sanic):
blueprint_1 = Blueprint("blueprint_1", url_prefix="/bp1")
blueprint_2 = Blueprint("blueprint_2", url_prefix="/bp2")
group = Blueprint.group(blueprint_1, blueprint_2)
assert group[0] == blueprint_1
with raises(expected_exception=IndexError) as e:
_ = group[3]
def test_bp_group_with_additional_route_params(app: Sanic):
blueprint_1 = Blueprint("blueprint_1", url_prefix="/bp1")
blueprint_2 = Blueprint("blueprint_2", url_prefix="/bp2")
@blueprint_1.route(
"/request_path", methods=frozenset({"PUT", "POST"}), version=2
)
def blueprint_1_v2_method_with_put_and_post(request: Request):
if request.method == "PUT":
return text("PUT_OK")
elif request.method == "POST":
return text("POST_OK")
@blueprint_2.route(
"/route/<param>", methods=frozenset({"DELETE", "PATCH"}), name="test"
)
def blueprint_2_named_method(request: Request, param):
if request.method == "DELETE":
return text(f"DELETE_{param}")
elif request.method == "PATCH":
return text(f"PATCH_{param}")
blueprint_group = Blueprint.group(
blueprint_1, blueprint_2, url_prefix="/api"
)
@blueprint_group.middleware("request")
def authenticate_request(request: Request):
global AUTH
auth = request.headers.get("authorization")
if auth:
# Dummy auth check. We can have anything here and it's fine.
if AUTH not in auth:
return text("Unauthorized", status=401)
else:
return text("Unauthorized", status=401)
@blueprint_group.middleware("response")
def enhance_response_middleware(request: Request, response: HTTPResponse):
response.headers.add("x-test-middleware", "value")
app.blueprint(blueprint_group)
header = {"authorization": " ".join(["Basic", AUTH])}
_, response = app.test_client.put(
"/v2/api/bp1/request_path", headers=header
)
assert response.text == "PUT_OK"
assert response.headers.get("x-test-middleware") == "value"
_, response = app.test_client.post(
"/v2/api/bp1/request_path", headers=header
)
assert response.text == "POST_OK"
_, response = app.test_client.delete("/api/bp2/route/bp2", headers=header)
assert response.text == "DELETE_bp2"
_, response = app.test_client.patch("/api/bp2/route/bp2", headers=header)
assert response.text == "PATCH_bp2"
_, response = app.test_client.put("/v2/api/bp1/request_path")
assert response.status == 401
def test_bp_group(app: Sanic):
blueprint_1 = Blueprint("blueprint_1", url_prefix="/bp1")
blueprint_2 = Blueprint("blueprint_2", url_prefix="/bp2")
@blueprint_1.route("/")
def blueprint_1_default_route(request):
return text("BP1_OK")
@blueprint_2.route("/")
def blueprint_2_default_route(request):
return text("BP2_OK")
blueprint_group_1 = Blueprint.group(
blueprint_1, blueprint_2, url_prefix="/bp"
)
blueprint_3 = Blueprint("blueprint_3", url_prefix="/bp3")
@blueprint_group_1.middleware("request")
def blueprint_group_1_middleware(request):
global MIDDLEWARE_INVOKE_COUNTER
MIDDLEWARE_INVOKE_COUNTER["request"] += 1
@blueprint_3.route("/")
def blueprint_3_default_route(request):
return text("BP3_OK")
blueprint_group_2 = Blueprint.group(
blueprint_group_1, blueprint_3, url_prefix="/api"
)
@blueprint_group_2.middleware("response")
def blueprint_group_2_middleware(request, response):
global MIDDLEWARE_INVOKE_COUNTER
MIDDLEWARE_INVOKE_COUNTER["response"] += 1
app.blueprint(blueprint_group_2)
@app.route("/")
def app_default_route(request):
return text("APP_OK")
_, response = app.test_client.get("/")
assert response.text == "APP_OK"
_, response = app.test_client.get("/api/bp/bp1")
assert response.text == "BP1_OK"
_, response = app.test_client.get("/api/bp/bp2")
assert response.text == "BP2_OK"
_, response = app.test_client.get("/api/bp3")
assert response.text == "BP3_OK"
assert MIDDLEWARE_INVOKE_COUNTER["response"] == 3
assert MIDDLEWARE_INVOKE_COUNTER["request"] == 2
def test_bp_group_list_operations(app: Sanic):
blueprint_1 = Blueprint("blueprint_1", url_prefix="/bp1")
blueprint_2 = Blueprint("blueprint_2", url_prefix="/bp2")
@blueprint_1.route("/")
def blueprint_1_default_route(request):
return text("BP1_OK")
@blueprint_2.route("/")
def blueprint_2_default_route(request):
return text("BP2_OK")
blueprint_group_1 = Blueprint.group(
blueprint_1, blueprint_2, url_prefix="/bp"
)
blueprint_3 = Blueprint("blueprint_2", url_prefix="/bp3")
@blueprint_3.route("/second")
def blueprint_3_second_route(request):
return text("BP3_OK")
assert len(blueprint_group_1) == 2
blueprint_group_1.append(blueprint_3)
assert len(blueprint_group_1) == 3
del blueprint_group_1[2]
assert len(blueprint_group_1) == 2
blueprint_group_1[1] = blueprint_3
assert len(blueprint_group_1) == 2
assert blueprint_group_1.url_prefix == "/bp"
| yunstanford/sanic | tests/test_blueprint_group.py | Python | mit | 5,549 |
''' Face: Blend Shape Mesh connected to the rig. '''
#system global imports
#Bungie C# library imports
#software specific imports
import pymel.core as pm
import maya.cmds as cmds
import os
#Bungie python imports
import model.blendnode
import metautil.node_attributes
import metautil.lists
class Mesh(object):
'''This is a mesh that is a blendshape. Does not need to be connected to the rig.'''
def __init__(self, mesh):
self.mesh = pm.PyNode(mesh)
self.blendnode = None
self.blendnodes = []
self.parallel_node = None
@classmethod
def create(cls, mesh):
'''
creates an instance of the class using either the base mesh or the source mesh.
:param object mesh: Creates an instance of BlendShapeModel to extend the mesh.
:return BlendShapeModel: Returns an instance of BlendShapeModel represented as the mesh.
'''
mesh = pm.PyNode(mesh)
return cls(mesh)
def get_string_name(self, include_namespace=False):
if not include_namespace and ':' in self.mesh.nodeName():
string_mesh = (self.mesh.nodeName()).split(':')[-1]
else:
return self.mesh.nodeName()
return string_mesh
def duplicate_mesh(self, mesh_name, remove_attrs=(), lambert_shader=False):
'''
Duplicates the head mesh and deletes the attribute that gets connected to the network node for the head mesh.
:param string mesh_name: The name of the newly created mesh.
:param list remove_attrs: removes attrs that are on the opposite mesh. The opposite mesh is duplicated from the base mesh.
:param bool lambert_shader: Apply a basic lambert shader.
:return class instance:
'''
duplicate_mesh = pm.duplicate(self.mesh, n=mesh_name)[0]
self.lock_mesh(duplicate_mesh, lock=False)
for attr in remove_attrs:
if duplicate_mesh.hasAttr(attr):
duplicate_mesh.deleteAttr(attr)
if lambert_shader:
self.create_material(duplicate_mesh)
return Mesh(duplicate_mesh)
def dup_and_delete(self, mesh_name=None, lambert_shader=False, remove_attrs=()):
'''
Duplicates its self and deletes the old copy. It renames it's self to the original.
:param string mesh_name:
:param list remove_attrs: removes attrs that are on the opposite mesh. The opposite mesh is duplicated from the base mesh.
:param bool lambert_shader: Apply a basic lambert shader.
:return: object wrapped in an instance of the class
'''
if not mesh_name:
pose_name = str(self.mesh)
pm.rename(self.mesh, 'temp_mesh_DELETE')
duplicate_mesh = pm.duplicate(self.mesh, n=mesh_name)[0]
for attr in remove_attrs:
if duplicate_mesh.hasAttr(attr):
duplicate_mesh.deleteAttr(attr)
self.lock_mesh(duplicate_mesh, lock=False)
if lambert_shader:
self.create_material(duplicate_mesh)
pm.delete(self.mesh)
return Mesh(duplicate_mesh)
def mirror(self, opposite_pose, base_mesh, skip_dialog=False):
'''
Mirrors the mesh.
:param object base_mesh: The base head mesh.
:return: Opposite mesh that is wrapped in an instance of the class
'''
if base_mesh and opposite_pose:
# locked attributes
locked_attrs = metautil.node_attributes.get_lock_attributes(base_mesh)
metautil.node_attributes.lock_attributes(base_mesh, lock=False)
# target mesh is the one that will recieve the blendshape and be mirrored
target = pm.duplicate(base_mesh, returnRootsOnly=1, name='target')
# the invShape mesh is the one that will be the final inverted blenshape
invShape = pm.duplicate(base_mesh, returnRootsOnly=1, name='invShape')
# create our blendshape, mirror the mesh, create the wrap deformer, and apply the blendshape
bs = pm.blendShape(self.mesh, target)
pm.xform(target, s=(-1, 1, 1))
pm.select(invShape[0], target[0], add=1)
cmds.CreateWrap()
# wrap = pm.deformer(invShape[0], target[0], type=pm.nt.Wrap)
# "inflate" our mesh to the correct mirrored position using our blendshape. BIG calculation
pm.setAttr(bs[0] + "." + str(self.mesh), 1)
hist = pm.listHistory(invShape)
index = hist.index(target[0] + "BaseShape")
# delete the history on our final shape so that we can delete the shape we used to make it
pm.delete(invShape, constructionHistory=1)
pm.select(target[0] + "BaseShape")
x = pm.pickWalk(d="up")
# delete our base shape node
pm.delete(x)
# delete our target shape so we're just left with the shape we want
pm.delete(target)
opposite_str = str(opposite_pose)
if pm.objExists(opposite_pose):
pm.delete(opposite_pose)
opposite_pose = Mesh(pm.rename(invShape, opposite_str), skip_dialog)
metautil.node_attributes.lock_attributes_dict(base_mesh, locked_attrs)
return opposite_pose
return
def mirror_separate_mesh(self, opposite_name, mirror_axis='sx', remove_attrs=(), lambert_shader=False):
'''
Mirrors a separate mesh. Example Mirror left eyelash mesh to right eyelash mesh.
:param string opposite_name: name of the opposite mesh
:param string mirror_axis: which axis the mirror should happen.
:param string remove_attrs: any attributes that need to be removed.
:param bool lambert_shader: connect a default shader
:return:
'''
mirror_mesh = pm.duplicate(self.mesh, name=str(opposite_name))[0]
mirror_attr = pm.PyNode(str(mirror_mesh)+'.'+mirror_axis)
mirror_attr.set(-1)
pm.makeIdentity(mirror_mesh, apply=True, t=False, r=False, s=True, n=0, pn=1)
for attr in remove_attrs:
if mirror_mesh.hasAttr(attr):
mirror_mesh.deleteAttr(attr)
if lambert_shader:
self.create_material(mirror_mesh)
return Mesh(mirror_mesh)
def create_blendnode(self, parameters, using_parameter=True, name='blendshape_node'):
'''
Creates the blend Shape node that connects all the shapes to the base head.
:param list parameters: list of class instances for the parameter data.
:param bool using_parameter: if True, it uses face parameters. False uses a list of shape names.
:param string name: name of the blend shape node.
:return BlendShapeNode: Returns the blend shape node.
'''
shapes = []
if using_parameter:
for parameter in parameters:
pose_name = parameter.get_name()
if pm.objExists(pose_name):
shapes.append(pose_name)
else:
shapes = parameters
if shapes:
self.blendnode = pm.blendShape(shapes, self.mesh, n=name)[0]
return model.blendnode.BlendShapeNode(self.blendnode)
def create_parallel_blendnode(self, source_mesh, base_mesh, name='parallel_blendshape'):
'''
Creates a parallel blend shape node.
:param nt.Mesh source_mesh: Mesh that is the shape you want to blend.
:param nt.Mesh base_mesh: A neutral posed mesh.
:param string name: name of the blend node.
:return:
'''
self.parallel_node = metautil.lists.first_in_list(pm.blendShape(source_mesh, base_mesh, self.mesh, parallel=True, n=name))
self.parallel_node.weight[0].set(1)
self.parallel_node.weight[1].set(1)
return model.blendnode.BlendShapeNode(self.parallel_node)
def create_parallel_shapes(self, blendnode, grp_name='mesh_blendshapes', remove_attrs=(), remove_from_layers=True):
'''
Creates blend shapes meshes from the source head.
:param nt.BLendnode blendnode: a node that has blend shapes atteached.
:param grp_name: A group node that is the parent of the newly created meshes.
:param list remove_attrs: removes attributes that get duplicated with the mesh.
:param bool remove_from_layers: Removes from the display layers.
:return list shapes: returns a list of all the newly created shapes.
'''
blendnode_dict = blendnode.get_blendnode_dict()
if not pm.objExists(grp_name):
mesh_blendshapes = pm.group(em=True, n=grp_name)
else:
mesh_blendshapes = pm.PyNode(grp_name)
shapes = []
for x in blendnode_dict:
blendnode.weight[blendnode_dict[x]].set(1)
new_mesh = self.duplicate_mesh(x, remove_attrs=remove_attrs)
if remove_from_layers:
new_mesh.remove_from_layers()
blendnode.weight[blendnode_dict[x]].set(0)
pm.parent(new_mesh, mesh_blendshapes)
shapes.append(x)
return shapes
def get_blendnodes(self, mesh=None):
'''
Returns all the blend nodes on a mesh.
:param nt.Mesh mesh: A poly Mesh.
:return: Returns all the blend nodes on a mesh.
'''
if not mesh:
mesh = self.mesh
connections = mesh.getShape().listConnections(type=(pm.nt.ObjectSet))
blendnodes = []
blendnode = metautil.lists.first_in_list(mesh.getShape().listConnections(type=(pm.nt.BlendShape)))
if blendnode:
blendnodes.append(blendnode)
for object_set in connections:
sets = object_set.listConnections()
[blendnodes.append(x) for x in sets if type(x) == pm.nt.BlendShape and not x in blendnodes]
return blendnodes
def delete_blendnodes(self):
'''
Removed all the blend shape nodes connected to the mesh.
'''
blendnodes = self.mesh.getShape().listConnections(type=pm.nt.BlendShape)
pm.delete(blendnodes)
self.blendnode = None
self.blendnodes = []
return
def delete_parallel_blendnode(self):
'''
Removes the parallel blend shape node.
'''
# need to add code to remove the parallel blender node that also gets created.
pm.delete(self.parallel_node)
return
def create_material(self, mesh, name='lambert_material'):
'''
Creates a basic lambert material to apply to the mesh.
:param object mesh: Mesh object
:param string name: name of the lambert.
:return list: Returns the material and the material group.
'''
material, material_group = pm.createSurfaceShader("lambert", name=name)
material.addAttr('lambert_material', at='message')
pm.sets(material_group, forceElement=[mesh])
return [material, material_group]
def lock_mesh(self, mesh=None, lock=True, attrs=None):
'''
Lock attributes on the mesh
:param object mesh: Mesh object
:param bool lock: True locks the attributes. False unlocks the attributes.
:param string list attrs: List of attributes to either lock or unlock.
'''
locked_mesh = self.mesh
if not attrs:
attrs = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v']
if type(attrs) != list:
attrs = [attrs]
if mesh:
locked_mesh = mesh
for attr in attrs:
pm.setAttr(str(locked_mesh)+'.'+attr, l=lock)
return
def remove_from_layers(self):
'''
removes a mesh from what ever layers it's under.
'''
display_layer = self.mesh.drawOverride.listConnections(type=pm.nt.DisplayLayer)
display_layer_shape = self.mesh.getShape().drawOverride.listConnections(type=pm.nt.DisplayLayer)
if display_layer:
for layer in display_layer:
layer.drawInfo // self.mesh.drawOverride
if display_layer_shape:
for layer in display_layer_shape:
layer.drawInfo // self.mesh.getShape().drawOverride
return
def __str__(self):
return self.mesh.__str__()
def __getattr__(self, attrname):
if attrname == 'pynode':
raise AttributeError("this instance of {0} has no pynode".format(self.__class__.__name__))
return getattr(self.mesh, attrname)
def __melobject__(self):
return self.mesh.__melobject__()
def __repr__(self):
return self.mesh.__repr__() | deathglitch/metarigging | python/model/mesh.py | Python | mit | 10,929 |
"""Base class for creating scikit-learn Pipeline compatible steps"""
from abc import ABC, abstractmethod
class Step(ABC):
"""Step in pipeline.
You *must* override "transform"
"""
@abstractmethod
def transform(self, df, y=None):
"""Transform a DataFrame"""
pass
# API compatibility
def fit(X, y, sample_weight=None):
pass
def get_params(self, deep=False):
return vars(self)
def set_params(self, **kw):
self.__dict__.update(**kw)
# Example Usage
class Sampler(Step):
"""Sample data frame"""
def __init__(self, frac, random_state=None):
self.frac = frac
self.random_state = random_state
def transform(self, df, y=None):
df = df.sample(frac=self.frac, random_state=self.random_state)
return df.copy()
class ColMul(Step):
"""Multiply column by constant"""
def __init__(self, col, val):
self.col = col
self.val = val
def transform(self, df, y=None):
df = df.copy()
df[self.col] = df[self.col] * self.val
return df
# Example Usage
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
pipe = Pipeline([
('sample', Sampler(0.2)),
('mul_z', ColMul('z', 100)),
])
pipe.set_params(sample__random_state=17)
df = pd.DataFrame(np.random.rand(100, 3), columns=['x', 'y', 'z'])
print(pipe.transform(df))
| tebeka/pythonwise | sklearn-pipes.py | Python | bsd-3-clause | 1,410 |
import json
from datetime import datetime
import tornado.web
from sqlalchemy import and_
from pylm.registry.handlers.persistency.db import DB
from pylm.registry.handlers.persistency.models import ClusterLog, Cluster
from pylm.registry.messages.registry_pb2 import LogMessages
class LogsHandler(tornado.web.RequestHandler):
def get(self):
cluster = self.get_argument('cluster')
fr = self.get_argument('fr', default='1970-01-01T00:00:00.000000')
to = self.get_argument('to', default='2200-01-01T00:00:00.000000')
# Parse the dates
fr = datetime.strptime(fr, "%Y-%m-%dT%H:%M:%S.%f")
to = datetime.strptime(to, "%Y-%m-%dT%H:%M:%S.%f")
logs = list()
for log_line in DB.session.query(
ClusterLog
).filter(and_(ClusterLog.cluster == cluster,
ClusterLog.when < to,
ClusterLog.when > fr)).all():
logs.append(log_line.to_dict())
self.set_status(200)
self.write(json.dumps(logs).encode('utf-8'))
def post(self):
cluster = self.get_argument('cluster')
cluster_in_db = DB.session.query(
Cluster).filter(Cluster.key == cluster).one_or_none()
if cluster_in_db:
buffer = LogMessages()
buffer.ParseFromString(self.request.body)
for message in buffer.messages:
log = ClusterLog()
log.when = datetime.now()
log.cluster = cluster
log.text = message
DB.session.add(log)
DB.session.commit()
# This is important. A Post request requires something
# in its body, otherwise it gives a 599 HTTP error.
self.set_status(200)
self.write(b'')
else:
self.set_status(400)
self.write(b'')
| nfqsolutions/pylm-registry | pylm/registry/handlers/logs.py | Python | agpl-3.0 | 1,876 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-23 19:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0002_auto_20170824_0252'),
]
operations = [
migrations.AddField(
model_name='penelitian',
name='dosen',
field=models.ManyToManyField(to='base.Dosen'),
),
migrations.AddField(
model_name='penelitian',
name='mahasiswa',
field=models.ManyToManyField(to='base.Mahasiswa'),
),
]
| kurniantoska/ichsan_proj | unisan/base/migrations/0003_auto_20170824_0312.py | Python | apache-2.0 | 627 |
# !/usr/bin/env python3
"""
Custom Bi Directional Binary Data Parser
ModParser
"""
# ====================== GPL License and Copyright Notice ======================
# This file is part of ModParser
# Copyright (C) 2017 Diana Land
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ModReader. If not, see <http://www.gnu.org/licenses/>.
#
# https://github.com/iaz3/ModParser
#
# =============================================================================
| iaz3/ModParser | modparser/__init__.py | Python | gpl-3.0 | 983 |
#!/usr/bin/env python
NOT_SURE = 2
TRUE = 1
FALSE = 0
class Expression():
def __init__(self):
self.conditions = []
def saysIsDifFrom(self, origin, different, fr):
self.conditions.append(str(origin)+'?'+str(different)+'!'+str(fr)+':'+str(different)+'='+str(fr))
def saysIsEqTo(self, origin, equals, to):
self.conditions.append(str(origin)+'?'+str(equals)+'='+str(to)+':'+str(equals)+'!'+str(to))
def saysIsLiar(self, origin, liar):
self.conditions.append(str(origin)+'?'+str(origin)+'!'+str(liar)+':'+str(origin)+'!'+str(liar))
def saysIsTrue(self, origin, true):
self.conditions.append(str(origin)+'?'+str(origin)+'='+str(true)+':'+str(origin)+'='+str(true))
def __repr__(self):
out = ""
for expr in self.conditions:
out += "("+expr+")|"
return out[:-1]
def evaluateEquals(origin, resultGroup, condition):
target,value = map(int, condition.split("="))
target -= 1
value -= 1
if resultGroup[target].getState() == NOT_SURE or resultGroup[value].getState() == NOT_SURE:
if resultGroup[target].getState() != NOT_SURE:
resultGroup[value].setState(resultGroup[target].getState())
elif resultGroup[value].getState() != NOT_SURE:
resultGroup[target].setState(resultGroup[value].getState())
elif resultGroup[target].getState() != resultGroup[value].getState():
print "Conflicting condition: "+condition
return True,True,resultGroup
def evaluateDifferent(origin, resultGroup, condition):
target,value = map(int, condition.split("!"))
target -= 1
value -= 1
if resultGroup[target].getState() == NOT_SURE or resultGroup[value].getState() == NOT_SURE:
if resultGroup[target].getState() != NOT_SURE:
resultGroup[value].setState(resultGroup[target].getOppositeState())
elif resultGroup[value].getState() != NOT_SURE:
resultGroup[target].setState(resultGroup[value].getOppositeState())
elif resultGroup[target].getState() != resultGroup[value].getState():
print "Conflicting condition: "+condition
return True,True,resultGroup
def evaluateNotSureOrigin(origin, resultGroup, condition1, condition2):
def solveExpression(self, group, expression):
print "entered solveexpression: "+str(group)+" , "+expression
resultGroup = group[:]
tokens = expression.split("?")
conds = tokens[1].split(":")
origin = int(tokens[0])-1
condition = ""
target = 0
value = 0
if (resultGroup[origin].getState() == TRUE):
condition = conds[0]
elif (resultGroup[origin].getState() == FALSE):
condition = conds[1]
else:
return self.evaluateNotSureOrigin(origin, resultGroup, cond[0], cond[1])
if condition.count("=") > 0:
return self.evaluateEquals(origin, resultGroup, condition)
else:
return self.evaluateDifferent(origin, resultGroup, condition)
return True,False,resultGroup
def applyToGroup(self, group):
resultGroup = group[:]
print "entered applytogroup: "+str(group)
for expression in self.conditions:
remove, error, resultGroup = self.solveExpression(resultGroup, expression)
print "return from solve - rem: "+str(remove)+" err: "+str(error)+" retGr: "+str(resultGroup)
if error == True:
return True, resultGroup
return False, resultGroup
class Member():
def __init__(self):
self.state = NOT_SURE
self.depends = 0
self.dependState = NOT_SURE
self.same = 0
self.differ = 0
self.isorigin = False
self.expression = Expression()
def isOrigin(self):
return self.isorigin
def setIsOrigin(self, isorigin):
self.isorigin = isorigin
def getExpression(self):
return self.expression
def setExpression(self, expression):
self.expression = expression
def getState(self):
return self.state
def getOppositeState(self):
return TRUE if self.state == TRUE else FALSE if self.state == FALSE else NOT_SURE
def setState(self, state):
self.state = state
def getDepends(self):
return self.depends, self.dependState
def setDepends(self, dependIdx, dependState):
self.depends = dependIdx
self.dependState = dependState
def getSame(self):
return self.same
def setSame(self, sameIdx, dependIdx, dependState):
self.same = sameIdx
self.setDepends(dependIdx, dependState)
def getDiffer(self):
return self.differ
def setDiffer(self, differIdx, dependIdx, dependState):
self.differ = differIdx
self.setDepends(dependIdx, dependState)
def clone(self):
clone = Member()
clone.setState(self.getState())
clone.setExpression(self.getExpression())
clone.setIsOrigin(self.isOrigin())
return clone
def __repr__(self):
stateStr = "T" if self.state == TRUE else "L" if self.state == FALSE else "-"
return stateStr+("" if self.depends == 0 else "d"+str(self.depends)+("T" if self.dependState == TRUE else "F" if self.dependState == FALSE else "-"))+("" if self.same == 0 else "S"+str(self.same))+("" if self.differ == 0 else "D"+str(self.differ))
def getMemberListFromStatement(sampleGroup, statement):
stTrueGroup = [Member() for x in xrange(len(sampleGroup))]
stFalseGroup = [Member() for x in xrange(len(sampleGroup))]
stList = statement.split(" ")
origin = int(stList[0])
modifier = stList[1]
args = map(int, stList[2:])
sampleGroup[origin-1].setIsOrigin(True)
for arg in args:
sampleGroup[arg-1].setIsOrigin(True)
# Build "true premise" group
if modifier == 'T':
stTrueGroup[args[0]-1].setState(TRUE)
stTrueGroup[origin-1].setState(TRUE)
sampleGroup[args[0]-1].getExpression().saysIsTrue(origin, args[0])
elif modifier == 'L':
stTrueGroup[args[0]-1].setState(FALSE)
stTrueGroup[origin-1].setState(TRUE)
sampleGroup[args[0]-1].getExpression().saysIsLiar(origin, args[0])
elif modifier == 'S':
if args[0] == origin or args[1] == origin:
stTrueGroup[args[0]-1].setState(TRUE)
stTrueGroup[args[1]-1].setState(TRUE)
else:
stTrueGroup[args[0]-1].setSame(args[1], origin, TRUE)
stTrueGroup[args[1]-1].setSame(args[0], origin, TRUE)
sampleGroup[args[0]-1].getExpression().saysIsEqTo(origin, args[0], args[1])
sampleGroup[args[1]-1].getExpression().saysIsEqTo(origin, args[0], args[1])
else: # modifier == 'D'
if args[0] == origin:
stTrueGroup[args[0]-1].setState(TRUE)
stTrueGroup[args[1]-1].setState(FALSE)
elif args[1] == origin:
stTrueGroup[args[0]-1].setState(FALSE)
stTrueGroup[args[1]-1].setState(TRUE)
else:
stTrueGroup[args[0]-1].setDiffer(args[1], origin, TRUE)
stTrueGroup[args[1]-1].setDiffer(args[0], origin, TRUE)
sampleGroup[args[0]-1].getExpression().saysIsDifFrom(origin, args[0], args[1])
sampleGroup[args[1]-1].getExpression().saysIsDifFrom(origin, args[0], args[1])
# Build "false premise" group
if modifier == 'T':
stFalseGroup[origin-1].setState(FALSE) #Set origin first to be overwritten if equal
stFalseGroup[args[0]-1].setState(FALSE)
elif modifier == 'L':
stFalseGroup[origin-1].setState(FALSE) #Set origin first to be overwritten if equal
stFalseGroup[args[0]-1].setState(TRUE)
elif modifier == 'S':
if args[0] == origin:
stFalseGroup[args[0]-1].setState(FALSE)
stFalseGroup[args[1]-1].setState(TRUE)
elif args[1] == origin:
stFalseGroup[args[0]-1].setState(TRUE)
stFalseGroup[args[1]-1].setState(FALSE)
else:
stFalseGroup[args[0]-1].setDiffer(args[1], origin, FALSE)
stFalseGroup[args[1]-1].setDiffer(args[0], origin, FALSE)
else: # modifier == 'D':
if args[0] == origin or args[1] == origin:
stFalseGroup[args[0]-1].setState(FALSE)
stFalseGroup[args[1]-1].setState(FALSE)
else:
stFalseGroup[args[0]-1].setSame(args[1], origin, FALSE)
stFalseGroup[args[1]-1].setSame(args[0], origin, FALSE)
return stTrueGroup, stFalseGroup
def cloneGroup(group):
newGroup = []
for member in group:
newGroup.append(member.clone())
return newGroup
def getResultGroups(groupList, resultList, group, index):
stateList = [NOT_SURE, TRUE, FALSE]
if not group[index].isOrigin():
stateList = [NOT_SURE]
for state in stateList:
group[index].setState(state)
if index == len(group)-1:
error = False
clone = cloneGroup(group)
signature = str(clone)
sigOut = False
while not error and not sigOut:
for member in clone:
error, clone = member.getExpression().applyToGroup(clone)
print "return from applytogroup err: "+str(error)+" group: "+str(clone)
if error == True: break;
newSig = str(clone)
if signature == newSig:
sigOut = True
else:
signature = newSig
if error == False:
print "Adding to list: "+str(group)+" thisClone: "+str(clone)
groupList.append(cloneGroup(group))
else:
getResultGroups(groupList, resultList, group, index+1)
for case in range(1,int(raw_input())+1): # For all test cases
group_len, statement_len = map(int, raw_input().split(" ")) # Get test case information
group = [Member() for x in xrange(group_len)]
statements = []
for i in range(statement_len):
statements.append(raw_input())
for statement in statements:
print "Statement: "+statement
getMemberListFromStatement(group, statement);
resultGroups = []
resultStates = []
# brute force com possibilidades para verificar dependencias.
getResultGroups(resultGroups, resultStates, group, 0)
print resultGroups
print "Case #%d: %s" % (case, " ".join([str(group[x]) for x in xrange(len(group))])) # Report results
| diogobohm/codejam | python/2010-africa/D.py | Python | gpl-3.0 | 9,239 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.