code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
import unittest
#from fusedwind.plant_flow.asym import AEPMultipleWindRoses
# from gclarsen.fused import FGCLarsen, FWindTurbine, FWindFarm, \
# generate_GenericWindTurbinePowerCurveVT, generate_GenericWindFarmTurbineLayout, \
# rosetta
import fusedwake.WindFarm as wf
import fusedwake.WindTurbine as wt
import fusedwake.gcl.python as gcl
import numpy as np
from fusedwake.py4we.wasp import WWH
import os
script_dir = os.path.dirname(os.path.realpath(__file__))
#from fusedwake.fusedwasp import PlantFromWWH
#
# class FGCLarsenTestCase(unittest.TestCase):
#
# def setUp(self):
# self.v80 = wt.WindTurbine('Vestas v80 2MW offshore','V80_2MW_offshore.dat',70,40)
# self.HR1 = wf.WindFarm('Horns Rev 1','HR_coordinates.dat',self.v80)
#
# def tearDown(self):
# pass
#
# def test_Init(self):
# gcl = FGCLarsen()
#
# def test_FWindTurbine(self):
# """Generate a GenericWindTurbinePowerCurveVT from a WindTurbine, and
# converte it back into a WindTurbine. Test that the conversion has not
# corrupted any data.
# """
# fwt = FWindTurbine(self.v80.name, generate_GenericWindTurbinePowerCurveVT(self.v80))
# # Test that the two turbines are identical
# for i in ['H', 'R', 'u_cutin', 'u_cutout', 'name']:
# self.assertEqual(getattr(fwt, i), getattr(self.v80, i))
# for i in ['ref_P', 'ref_u', 'ref_CT']:
# np.testing.assert_almost_equal(getattr(fwt, i), getattr(self.v80, i))
#
# def test_FWindFarm(self):
# fwf = FWindFarm(self.HR1.name, generate_GenericWindFarmTurbineLayout(self.HR1))
# # Test that the two wind farms are identical
# for i in ['name', 'nWT']:
# self.assertEqual(getattr(fwf, i), getattr(self.HR1, i))
# for i in ['name', 'nWT']:
# self.assertEqual(getattr(fwf, i), getattr(self.HR1, i))
#
#
# def test_Run(self):
# inputs = dict(
# WS=8.0,
# z0=0.0001,
# TI=0.05,
# WD=270,
# WF=self.HR1,
# NG=4,
# sup='lin',
# pars=[0.5,0.9,-0.124807893,0.136821858,15.6298,1.0])
# P_WT,U_WT, Ct = gcl.GCLarsen(**inputs)
# fgcl = FGCLarsen()
# # Setting the inputs
# for k,v in rosetta.iteritems():
# setattr(fgcl, v, inputs[k])
# fgcl.wt_layout = generate_GenericWindFarmTurbineLayout(inputs['WF'])
# fgcl.run()
# np.testing.assert_almost_equal(P_WT, fgcl.wt_power)
# np.testing.assert_almost_equal(U_WT, fgcl.wt_wind_speed)
#
class GCLarsen_v2_TestCase(unittest.TestCase):
def setUp(self):
self.v80 = wt.WindTurbine('Vestas v80 2MW offshore', script_dir+'/V80_2MW_offshore.dat', 70,40)
self.HR1 = wf.WindFarm(name='Horns Rev 1', coordFile=script_dir+'/HR_coordinates.dat', WT=self.v80)
self.inputs = dict(
WS=8.0,
z0=0.0001,
TI=0.05,
WD=270,
WF=self.HR1,
NG=4,
sup='lin',
pars=[0.5, 0.9, -0.124807893, 0.136821858, 15.6298, 1.0])
def tearDown(self):
pass
def test_GCLarsen_v2(self):
"""Testing that the new implementation of GCLarsen is compatible with the
old one.
"""#
P_WT, U_WT, Ct = gcl.GCLarsen_v0(**self.inputs)
P_WT2, U_WT2, Ct2 = gcl.GCLarsen(**self.inputs)
np.testing.assert_almost_equal(P_WT, P_WT2)
np.testing.assert_almost_equal(U_WT, U_WT2)
np.testing.assert_almost_equal(Ct, Ct2)
# class test_AEP(unittest.TestCase):
# def test_HR(self):
# ### Single wind rose type
# hrAEP = AEPMultipleWindRoses()
# hrAEP.add('wf', FGCLarsen())
# hrAEP.configure()
# hrAEP.connect('wt_layout', 'wf.wt_layout')
# hrAEP.wt_layout = PlantFromWWH(filename='wind_farms/horns_rev/hornsrev1_turbine_nodescription.wwh').wt_layout
# hrAEP.wind_directions = [0., 90., 180., 270.]#linspace(0.0, 360.0, 3)[:-1]
# hrAEP.wind_speeds = [8., 12., 24.]#linspace(4.0, 25.0, 3)
# hrAEP.run()
# print hrAEP.net_aep
# print hrAEP.wt_aep
if __name__ == "__main__":
unittest.main()
| rethore/FUSED-Wake | fusedwake/gcl/python/test/test_gclarsen.py | Python | agpl-3.0 | 4,252 |
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Sample test layers
$Id: samplelayers.py 30501 2005-05-25 17:40:56Z tim_one $
"""
layer = '0' # Internal to samples. Not part of layer API
layerx = '0'
class Layer1:
# Internal to samples. Not part of layer API:
layer = '1'
base = '0'
layerx = '0'
def setUp(self):
global layer
if layer != self.base:
raise ValueError("Bad layer, %s, for %s." % (layer, self))
layer = self.layer
setUp = classmethod(setUp)
def tearDown(self):
global layer
if layer != self.layer:
raise ValueError("Bad layer, %s, for %s." % (layer, self))
layer = self.base
tearDown = classmethod(tearDown)
class Layerx:
layerx = '1' # Internal to samples. Not part of layer API
basex = '0'
def setUp(self):
global layerx
if layerx != self.basex:
raise ValueError("Bad layerx, %s, for %s." % (layerx, self))
layerx = self.layerx
setUp = classmethod(setUp)
def tearDown(self):
global layerx
if layerx != self.layerx:
raise ValueError("Bad layerx, %s, for %s." % (layerx, self))
layerx = self.basex
tearDown = classmethod(tearDown)
class Layer11(Layer1):
layer = '11' # Internal to samples. Not part of layer API
base = '1' # Internal to samples. Not part of layer API
class Layer12(Layer1):
layer = '12' # Internal to samples. Not part of layer API
base = '1' # Internal to samples. Not part of layer API
class Layer111(Layerx, Layer11):
layer = '111' # Internal to samples. Not part of layer API
base = '11' # Internal to samples. Not part of layer API
layerx = '2' # Internal to samples. Not part of layer API
basex = '1'
def setUp(self):
global layer
if layer != self.base:
raise ValueError("Bad layer, %s, for %s." % (layer, self))
layer = self.layer
global layerx
if layerx != self.basex:
raise ValueError("Bad layerx, %s, for %s." % (layerx, self))
layerx = self.layerx
setUp = classmethod(setUp)
def tearDown(self):
global layer
if layer != self.layer:
raise ValueError("Bad layer, %s, for %s." % (layer, self))
layer = self.base
global layerx
if layerx != self.layerx:
raise ValueError("Bad layerx, %s, for %s." % (layerx, self))
layerx = self.basex
tearDown = classmethod(tearDown)
class Layer121(Layer12):
layer = '121' # Internal to samples. Not part of layer API
base = '12' # Internal to samples. Not part of layer API
class Layer112(Layerx, Layer11):
layer = '112' # Internal to samples. Not part of layer API
base = '11' # Internal to samples. Not part of layer API
layerx = '2' # Internal to samples. Not part of layer API
basex = '1'
def setUp(self):
global layer
if layer != self.base:
raise ValueError("Bad layer, %s, for %s." % (layer, self))
layer = self.layer
global layerx
if layerx != self.basex:
raise ValueError("Bad layerx, %s, for %s." % (layerx, self))
layerx = self.layerx
setUp = classmethod(setUp)
def tearDown(self):
global layer
if layer != self.layer:
raise ValueError("Bad layer, %s, for %s." % (layer, self))
layer = self.base
global layerx
if layerx != self.layerx:
raise ValueError("Bad layerx, %s, for %s." % (layerx, self))
layerx = self.basex
tearDown = classmethod(tearDown)
class Layer122(Layer12):
layer = '122' # Internal to samples. Not part of layer API
base = '12' # Internal to samples. Not part of layer API
| Donkyhotay/MoonPy | zope/testing/testrunner-ex/samplelayers.py | Python | gpl-3.0 | 4,377 |
# -*- coding: utf-8 -*-
#
# Flask-Session documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 25 19:56:40 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
sys.path.append(os.path.abspath('_themes'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flask-Session'
copyright = u'2014, Shipeng Feng'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'flask_small'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'index_logo': 'flask-session.png',
'github_fork': 'fengsp/flask-session'
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-Sessiondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Flask-Session.tex', u'Flask-Session Documentation',
u'Shipeng Feng', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'flask-session', u'Flask-Session Documentation',
[u'Shipeng Feng'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Flask-Session', u'Flask-Session Documentation',
u'Shipeng Feng', 'Flask-Session', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
intersphinx_mapping = {
'http://docs.python.org/dev': None,
'http://flask.pocoo.org/docs/': None,
'http://werkzeug.pocoo.org/docs/': None,
'https://pythonhosted.org/Flask-SQLAlchemy/': None
}
| mchiocca/flask-session | docs/conf.py | Python | bsd-3-clause | 8,648 |
# coding=utf-8
# Author: Dieter Blomme <dieterblomme@gmail.com>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import sickbeard
from sickbeard import logger
from sickrage.helper.exceptions import ex
from libtrakt import TraktAPI
from libtrakt.exceptions import traktException, traktServerBusy, traktAuthException
class TraktNotifier(object):
"""
A "notifier" for trakt.tv which keeps track of what has and hasn't been added to your library.
"""
def notify_snatch(self, ep_name):
pass
def notify_download(self, ep_name):
pass
def notify_subtitle_download(self, ep_name, lang):
pass
def notify_git_update(self, new_version):
pass
def notify_login(self, ipaddress=""):
pass
def update_library(self, ep_obj):
"""
Sends a request to trakt indicating that the given episode is part of our library.
ep_obj: The TVEpisode object to add to trakt
"""
trakt_id = sickbeard.indexerApi(ep_obj.show.indexer).config['trakt_id']
trakt_api = TraktAPI(sickbeard.SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
if sickbeard.USE_TRAKT:
try:
# URL parameters
data = {
'shows': [
{
'title': ep_obj.show.name,
'year': ep_obj.show.startyear,
'ids': {},
}
]
}
if trakt_id == 'tvdb_id':
data['shows'][0]['ids']['tvdb'] = ep_obj.show.indexerid
else:
data['shows'][0]['ids']['tvrage'] = ep_obj.show.indexerid
if sickbeard.TRAKT_SYNC_WATCHLIST:
if sickbeard.TRAKT_REMOVE_SERIESLIST:
trakt_api.traktRequest("sync/watchlist/remove", data, method='POST')
# Add Season and Episode + Related Episodes
data['shows'][0]['seasons'] = [{'number': ep_obj.season, 'episodes': []}]
for relEp_Obj in [ep_obj] + ep_obj.relatedEps:
data['shows'][0]['seasons'][0]['episodes'].append({'number': relEp_Obj.episode})
if sickbeard.TRAKT_SYNC_WATCHLIST:
if sickbeard.TRAKT_REMOVE_WATCHLIST:
trakt_api.traktRequest("sync/watchlist/remove", data, method='POST')
# update library
trakt_api.traktRequest("sync/collection", data, method='POST')
except (traktException, traktAuthException, traktServerBusy) as e:
logger.log(u"Could not connect to Trakt service: %s" % ex(e), logger.WARNING)
def update_watchlist(self, show_obj=None, s=None, e=None, data_show=None, data_episode=None, update="add"):
"""
Sends a request to trakt indicating that the given episode is part of our library.
show_obj: The TVShow object to add to trakt
s: season number
e: episode number
data_show: structured object of shows trakt type
data_episode: structured object of episodes trakt type
update: type o action add or remove
"""
trakt_api = TraktAPI(sickbeard.SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
if sickbeard.USE_TRAKT:
data = {}
try:
# URL parameters
if show_obj is not None:
trakt_id = sickbeard.indexerApi(show_obj.indexer).config['trakt_id']
data = {
'shows': [
{
'title': show_obj.name,
'year': show_obj.startyear,
'ids': {},
}
]
}
if trakt_id == 'tvdb_id':
data['shows'][0]['ids']['tvdb'] = show_obj.indexerid
else:
data['shows'][0]['ids']['tvrage'] = show_obj.indexerid
elif data_show is not None:
data.update(data_show)
else:
logger.log(u"there's a coding problem contact developer. It's needed to be provided at lest one of the two: data_show or show_obj", logger.WARNING)
return False
if data_episode is not None:
data['shows'][0].update(data_episode)
elif s is not None:
# trakt URL parameters
season = {
'season': [
{
'number': s,
}
]
}
if e is not None:
# trakt URL parameters
episode = {
'episodes': [
{
'number': e
}
]
}
season['season'][0].update(episode)
data['shows'][0].update(season)
trakt_url = "sync/watchlist"
if update == "remove":
trakt_url += "/remove"
trakt_api.traktRequest(trakt_url, data, method='POST')
except (traktException, traktAuthException, traktServerBusy) as e:
logger.log(u"Could not connect to Trakt service: %s" % ex(e), logger.WARNING)
return False
return True
def trakt_show_data_generate(self, data):
showList = []
for indexer, indexerid, title, year in data:
trakt_id = sickbeard.indexerApi(indexer).config['trakt_id']
show = {'title': title, 'year': year, 'ids': {}}
if trakt_id == 'tvdb_id':
show['ids']['tvdb'] = indexerid
else:
show['ids']['tvrage'] = indexerid
showList.append(show)
post_data = {'shows': showList}
return post_data
def trakt_episode_data_generate(self, data):
# Find how many unique season we have
uniqueSeasons = []
for season, episode in data:
if season not in uniqueSeasons:
uniqueSeasons.append(season)
# build the query
seasonsList = []
for searchedSeason in uniqueSeasons:
episodesList = []
for season, episode in data:
if season == searchedSeason:
episodesList.append({'number': episode})
seasonsList.append({'number': searchedSeason, 'episodes': episodesList})
post_data = {'seasons': seasonsList}
return post_data
def test_notify(self, username, blacklist_name=None):
"""
Sends a test notification to trakt with the given authentication info and returns a boolean
representing success.
api: The api string to use
username: The username to use
blacklist_name: slug of trakt list used to hide not interested show
Returns: True if the request succeeded, False otherwise
"""
try:
trakt_api = TraktAPI(sickbeard.SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
trakt_api.validateAccount()
if blacklist_name and blacklist_name is not None:
trakt_lists = trakt_api.traktRequest("users/" + username + "/lists")
found = False
for trakt_list in trakt_lists:
if trakt_list['ids']['slug'] == blacklist_name:
return "Test notice sent successfully to Trakt"
if not found:
return "Trakt blacklist doesn't exists"
else:
return "Test notice sent successfully to Trakt"
except (traktException, traktAuthException, traktServerBusy) as e:
logger.log(u"Could not connect to Trakt service: %s" % ex(e), logger.WARNING)
return "Test notice failed to Trakt: %s" % ex(e)
notifier = TraktNotifier
| pkoutsias/SickRage | sickbeard/notifiers/trakt.py | Python | gpl-3.0 | 8,881 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Train a residual BC policy on top of a learned agent.
"""
import os
import pickle
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v2 as tf
from rrlfd.bc import train_utils
from rrlfd.residual import setup
from tensorflow.io import gfile
flags.DEFINE_string('domain', None, 'Domain from which to load task.')
flags.DEFINE_string('demo_task', None,
'Task used to gather demos in dataset, if different from '
'eval_task.')
flags.DEFINE_string('eval_task', None,
'If set, evaluate trained policy on this task.')
flags.DEFINE_enum('input_type', 'depth', ['depth', 'rgb', 'rgbd', 'position'],
'Input modality.')
flags.DEFINE_integer('test_set_size', 0,
'Number of additional demonstrations on which to evaluate '
'final model.')
flags.DEFINE_integer('test_set_start', None,
'Where in the dataset to start test set.')
flags.DEFINE_integer('batch_size', 64, 'Batch size for training.')
flags.DEFINE_integer('seed', 2, 'Experiment seed.')
flags.DEFINE_integer('eval_seed', 1, 'Environtment seed for evaluation.')
flags.DEFINE_boolean('increment_eval_seed', False,
'If True, increment eval seed after each eval episode.')
flags.DEFINE_integer('num_eval_episodes', 100,
'Number of episodes to evaluate.')
flags.DEFINE_boolean('collapse_in_eval', True,
'If True, collapse RL policy to its mean in evaluation.')
flags.DEFINE_boolean('stop_if_stuck', False,
'If True, end episode if observations and actions are '
'stuck.')
flags.DEFINE_integer('eval_freq', 100_000,
'Frequency (in environment training steps) with which to '
'evaluate policy.')
flags.DEFINE_boolean('eval_only', False,
'If True, evaluate policy ckpts of trained policy.')
# Flags for BC agent.
flags.DEFINE_boolean('binary_grip_action', True,
'If True, use open/close action space for gripper. Else '
'use gripper velocity.')
flags.DEFINE_enum('action_norm', 'unit', ['unit', 'zeromean_unitvar'],
'Which normalization to apply to actions.')
flags.DEFINE_enum('residual_action_norm', 'unit',
['none', 'unit', 'zeromean_unitvar', 'centered'],
'Which normalization to apply to residual actions.')
flags.DEFINE_float('residual_action_norm_scale', 1.0,
'Factor by which to scale residual actions. Applied to raw '
'predictions in none, unit and centered normalisation, and '
'to standard deviation in the case of zeromean_unitvar.')
flags.DEFINE_enum('signals_norm', 'none', ['none', 'unit', 'zeromean_unitvar'],
'Which normalization to apply to scalar observations.')
flags.DEFINE_string('original_demos_file', None,
'Dataset used to compute stats for action normalization.')
flags.DEFINE_integer('max_demos_to_load', None,
'Maximum number of demos from demos_file (in order) to '
'use to compute action stats.')
flags.DEFINE_integer('max_demo_length', None,
'If set, trim demonstrations to this length.')
flags.DEFINE_float('val_size', 0.05,
'Amount of data to exlude from action normalisation stats. '
'If < 1, the fraction of total loaded data points. Else the '
'number of data points.')
flags.DEFINE_boolean('val_full_episodes', True,
'If True, split data into train and validation on an '
'episode basis. Else split by individual time steps.')
flags.DEFINE_integer('residual_max_demos_to_load', 100,
'Number of demonstrations (in order, starting after the '
'last demo used by the base agent) to use for residual '
'training.')
flags.DEFINE_float('residual_val_size', 0.05,
'Val size applies to residual training.')
flags.DEFINE_string('last_activation', None,
'Activation function to apply to network output, if any.')
flags.DEFINE_list('fc_layer_sizes', [],
'Sizes of fully connected layers to add on top of bottleneck '
'layer, if any.')
flags.DEFINE_enum('regression_loss', 'l2', ['l2', 'nll'],
'Loss function to minimize for continuous action dimensions.')
flags.DEFINE_float('l2_weight', 0.9,
'How much relative weight to give to linear velocity loss.')
flags.DEFINE_integer('num_input_frames', 3,
'Number of frames to condition base policy on.')
flags.DEFINE_boolean('crop_frames', True,
'If True, crop input frames by 16 pixels in H and W.')
flags.DEFINE_boolean('augment_frames', True,
'If True, augment images by scaling, cropping and '
'rotating.')
flags.DEFINE_list('target_offsets', [1, 10, 20, 30],
'Offsets in time for actions to predict in behavioral '
'cloning.')
flags.DEFINE_enum('network', None,
['resnet18', 'resnet18_narrow32', 'resnet50', 'simple_cnn',
'hand_vil'],
'Policy network of base policy.')
flags.DEFINE_integer('num_epochs', 100, 'Number of epochs to train for.')
flags.DEFINE_list('epochs_to_eval', [],
'Epochs at which to evaluate checkpoint with best validation '
'error so far.')
flags.DEFINE_enum('optimizer', 'adam', ['adam', 'rmsprop'],
'Keras optimizer for training.')
flags.DEFINE_float('learning_rate', 1e-3, 'Learning rate for training.')
flags.DEFINE_float('weight_decay', 5e-4, 'Weight decay for training.')
flags.DEFINE_boolean('predict_residual', True,
'If True, train a residual agent. Else train RL from '
'scratch without base agent.')
flags.DEFINE_enum('rl_observation_network', None,
['resnet18', 'resnet18_narrow32', 'resnet50', 'simple_cnn',
'hand_vil'],
'Observation network of residual policy. If None, '
'observation network of base agent is reused.')
flags.DEFINE_boolean('late_fusion', False,
'If True, fuse stacked frames after convolutional layers. '
'If False, fuse at network input.')
flags.DEFINE_string('policy_init_path', None,
'If set, initialize network weights from a pickle file at '
'this path.')
flags.DEFINE_string('rl_observation_network_ckpt', None,
'If set, checkpoint from which to load observation network '
'weights.')
flags.DEFINE_string('base_controller', None,
'If set, a black-box controller to use for base actions.')
flags.DEFINE_string('bc_ckpt_to_load', None,
'If set, checkpoint from which to load base policy.')
flags.DEFINE_string('rl_ckpt_to_load', None,
'If set, checkpoint from which to load residual policy.')
flags.DEFINE_string('eval_id', '', 'ID to add to evaluation output path.')
flags.DEFINE_boolean('render_eval', False,
'If True, render environment during evaluation.')
# TODO(minttu): Consolidate flags with bc/train_utils and bc/train.
flags.DEFINE_integer('eval_episodes_to_save', 0,
'The number of eval episodes whose frames to write to '
'file.')
flags.DEFINE_boolean('init_from_bc', False,
'If True, use BC agent loaded from bc_ckpt_to_load as '
'initialization for RL observation and policy nets.')
flags.DEFINE_boolean('init_feats_from_bc', False,
'If True, initialize RL observation network with BC.')
flags.DEFINE_boolean('clip_actions', False,
'If True, clip actions to unit interval before '
'normalization.')
flags.DEFINE_string('logdir', None, 'Location to log results to.')
flags.DEFINE_boolean('load_saved', False,
'If True, load saved model from checkpoint. Else train '
'from scratch.')
flags.DEFINE_enum('base_visible_state', 'robot', ['image', 'robot', 'full'],
'State features on which to condition the base policy.')
flags.DEFINE_enum('residual_visible_state', 'robot', ['image', 'robot', 'full'],
'State features on which to condition the residual policy. '
'If using full state, the BC net features are replaced with '
'these true state features in input to RL policy.')
flags.DEFINE_float('bernoulli_rate', 0.,
'Fraction of time to use bernoulli exploration for gripper '
'action.')
flags.DEFINE_float('sticky_rate', 0.,
'Stickiness rate of bernoulli exploration for gripper '
'action.')
flags.DEFINE_string('exp_id', '', 'Experiment ID to add to output paths.')
flags.DEFINE_string('job_id', '', 'Job ID to add to output paths.')
FLAGS = flags.FLAGS
def reset_dir(directory):
if gfile.exists(directory):
gfile.DeleteRecursively(directory)
def set_paths(ckpt_to_load, network):
"""Set output paths based on base agent path and network."""
demos_file = setup.get_original_demos_path(ckpt_to_load)
# For now, assume the same dataset has unused demonstrations to use in
# residual training.
new_demos_file = demos_file
ckpt_dir = os.path.dirname(ckpt_to_load)
new_ckpt_dir = ckpt_dir.replace('/bc_policy/', '/residual_bc_policy/')
new_ckpt_dir = os.path.join(new_ckpt_dir, FLAGS.exp_id)
job_id = train_utils.set_job_id()
if job_id is not None:
new_ckpt_dir = os.path.join(new_ckpt_dir, job_id)
assert new_ckpt_dir != ckpt_dir
if not FLAGS.load_saved:
reset_dir(new_ckpt_dir)
summary_dir = train_utils.get_summary_dir_for_ckpt_dir(ckpt_dir, network)
new_summary_dir = summary_dir.replace('/bc_policy/', '/residual_bc_policy/')
if job_id is not None:
new_summary_dir = os.path.join(new_summary_dir, job_id)
assert new_summary_dir != summary_dir
if not FLAGS.load_saved:
reset_dir(new_summary_dir)
return new_demos_file, new_ckpt_dir, new_summary_dir
def main(_):
np.random.seed(FLAGS.seed)
tf.random.set_seed(FLAGS.seed)
demo_task = FLAGS.demo_task or FLAGS.eval_task
if demo_task is None:
raise ValueError('eval_task or demo_task must be set')
base_state = setup.set_visible_features(
FLAGS.domain, demo_task, FLAGS.base_visible_state)
residual_state = setup.set_visible_features(
FLAGS.domain, demo_task, FLAGS.residual_visible_state)
print('Base policy state features', base_state)
print('Residual policy state features', residual_state)
env = train_utils.make_environment(FLAGS.domain, demo_task)
demos_file, ckpt_dir, summary_dir = set_paths(
FLAGS.bc_ckpt_to_load, FLAGS.network)
# Create BC agent. In residual RL, it is used as the base agent, and in
# standalone RL for action and observation space normalization.
base_agent = setup.load_saved_bc_agent(
ckpt_to_load=FLAGS.bc_ckpt_to_load,
network_type=FLAGS.network,
late_fusion=FLAGS.late_fusion,
input_type=FLAGS.input_type,
domain=FLAGS.domain,
binary_grip_action=FLAGS.binary_grip_action,
num_input_frames=FLAGS.num_input_frames,
crop_frames=FLAGS.crop_frames,
target_offsets=[int(t) for t in FLAGS.target_offsets],
visible_state_features=base_state,
action_norm=FLAGS.action_norm,
signals_norm=FLAGS.signals_norm,
last_activation=FLAGS.last_activation,
fc_layer_sizes=FLAGS.fc_layer_sizes,
weight_decay=FLAGS.weight_decay,
max_demos_to_load=FLAGS.max_demos_to_load,
max_demo_length=FLAGS.max_demo_length,
val_size=FLAGS.val_size,
val_full_episodes=FLAGS.val_full_episodes,
split_seed=FLAGS.split_seed,
env=env,
task=demo_task,
grip_action_from_state=False,
zero_action_keeps_state=False,
early_closing=False,
)
print('action normalization mean\n', base_agent.action_space.mean)
print('action normalization std\n', base_agent.action_space.std)
# Verify base agent is loaded correctly (repro success).
# print('Evaluating standalone base agent')
# success_rates = eval_loop.eval_policy(
# env, FLAGS.eval_seed, FLAGS.increment_eval_seed, base_agent,
# FLAGS.num_eval_episodes)
include_base_feats = True
if ((FLAGS.bc_ckpt_to_load is None and FLAGS.policy_init_path is None)
or FLAGS.init_from_bc
or FLAGS.init_feats_from_bc):
include_base_feats = False
if FLAGS.residual_visible_state == 'full':
include_base_feats = False
include_base_action = FLAGS.predict_residual
# TODO(minttu): Scale residual spec minima and maxima according to residual
# action normalization.
residual_spec = setup.define_residual_spec(
residual_state, env, base_agent,
include_base_action=include_base_action,
include_base_feats=include_base_feats,
base_network=FLAGS.network)
# TODO(minttu): Allow predicting continuous actions even if base agent uese
# binary grip actions.
# TODO(minttu): Pass in action target dimension or action pred dimemsion?
binary_grip_action = FLAGS.binary_grip_action
# Action normalization of residual BC:
# -> centered (original demos std), with same scaling as residual exps
agent = setup.make_residual_bc_agent(
residual_spec=residual_spec,
base_agent=base_agent,
action_norm=FLAGS.residual_action_norm,
action_norm_scale=FLAGS.residual_action_norm_scale,
binary_grip_action=binary_grip_action,
env=env,
visible_state_features=residual_state)
dataset = None
if demos_file is not None:
dataset = train_utils.prepare_demos(
demos_file=demos_file,
input_type=FLAGS.input_type,
max_demos_to_load=FLAGS.residual_max_demos_to_load,
max_demo_length=FLAGS.max_demo_length,
augment_frames=FLAGS.augment_frames,
agent=None, # Do not reset action stats.
split_dir=ckpt_dir,
val_size=FLAGS.residual_val_size,
val_full_episodes=FLAGS.val_full_episodes,
skip=FLAGS.max_demos_to_load)
dataset.agent = agent # Use for demo normalization.
# Assumes dataset fits in RAM.
# Transform dataset once (to avoid transforming at each epoch).
# Replace observations with concatenated visible state, base action and
# base feats. Replace demo actions with residuals (given the base agent).
agent.preprocess_dataset(dataset, FLAGS.batch_size)
epochs_to_eval = [int(epoch) for epoch in FLAGS.epochs_to_eval]
if not FLAGS.load_saved:
if ckpt_dir is not None:
gfile.makedirs(ckpt_dir)
with gfile.GFile(os.path.join(ckpt_dir, 'train_split.pkl'), 'wb') as f:
pickle.dump(dataset.train_split, f)
with gfile.GFile(
os.path.join(ckpt_dir, 'episode_train_split.pkl'), 'wb') as f:
pickle.dump(dataset.episode_train_split, f)
train_utils.train(
dataset=dataset,
agent=agent,
ckpt_dir=ckpt_dir,
optimizer_type=FLAGS.optimizer,
learning_rate=FLAGS.learning_rate,
batch_size=FLAGS.batch_size,
num_epochs=FLAGS.num_epochs,
loss_fn=FLAGS.regression_loss,
l2_weight=FLAGS.l2_weight,
summary_dir=summary_dir,
epochs_to_eval=epochs_to_eval)
test_set_size = FLAGS.test_set_size
test_dataset = None
if test_set_size > 0:
test_set_start = FLAGS.test_set_start or FLAGS.max_demos_to_load
test_dataset = train_utils.prepare_demos(
demos_file, FLAGS.input_type, test_set_start + test_set_size,
FLAGS.max_demo_length, augment_frames=False, agent=agent,
split_dir=None,
val_size=test_set_size / (test_set_start + test_set_size),
val_full_episodes=True)
num_eval_episodes = FLAGS.num_eval_episodes
if FLAGS.eval_task is not None and num_eval_episodes > 0:
env = train_utils.make_environment(
FLAGS.domain, FLAGS.eval_task, FLAGS.use_egl, FLAGS.render_eval)
summary_writer = train_utils.make_summary_writer(summary_dir)
ckpts_to_eval = train_utils.get_checkpoints_to_evaluate(
ckpt_dir, epochs_to_eval)
print('Evaluating ckpts', ckpts_to_eval)
epoch_to_success_rates = {}
for ckpt in ckpts_to_eval:
train_utils.evaluate_checkpoint(
ckpt=ckpt,
ckpt_dir=ckpt_dir,
agent=agent,
env=env,
num_eval_episodes=num_eval_episodes,
epoch_to_success_rates=epoch_to_success_rates,
summary_writer=summary_writer,
test_dataset=test_dataset)
if __name__ == '__main__':
app.run(main)
| google-research/google-research | rrlfd/residual/train_bc.py | Python | apache-2.0 | 17,630 |
#!/usr/bin/python
import os
import json
from pprint import pprint
from os import environ as env
import glanceclient.exc
from collections import Counter
import novaclient.v1_1.client as nvclient
import glanceclient.v2.client as glclient
import keystoneclient.v2_0.client as ksclient
def get_nova_credentials():
cred = {}
cred['username'] = os.environ['OS_USERNAME']
cred['api_key'] = os.environ['OS_PASSWORD']
cred['auth_url'] = os.environ['OS_AUTH_URL']
cred['project_id'] = os.environ['OS_TENANT_NAME']
return cred
def main():
keystone = ksclient.Client(auth_url=env['OS_AUTH_URL'],
username=env['OS_USERNAME'],
password=env['OS_PASSWORD'],
tenant_name=env['OS_TENANT_NAME'])
credentials = get_nova_credentials()
glance_endpoint = keystone.service_catalog.url_for(service_type='image')
nc = nvclient.Client(**credentials)
gc = glclient.Client(glance_endpoint, token=keystone.auth_token)
L = []
for server in nc.servers.list(detailed=True):
imagedata = server.image
if imagedata:
try:
jsondata = json.dumps(imagedata['id'])
image_id = jsondata.translate(None, '"')
except ValueError:
print "Decoding JSON has failed"
try:
imageinfo = gc.images.get(image_id)
except glanceclient.exc.HTTPException:
continue
try:
jsondata = json.dumps(imageinfo['name'])
image_name = jsondata.translate(None, '"')
except ValueError:
print "Decoding JSON has failed"
L.append(image_name)
count = Counter(L)
print "***** %s *****" % os.environ['OS_TENANT_NAME']
for key, value in sorted(count.iteritems()):
print "%s,%d" % (key, value)
if __name__ == '__main__':
main()
| kionetworks/openstack-api-scripts | tenant_glance_images.py | Python | apache-2.0 | 1,996 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import hashlib
import json
import os
import traceback
import urlparse
from abc import ABCMeta, abstractmethod
from ..testrunner import Stop
here = os.path.split(__file__)[0]
def executor_kwargs(test_type, server_config, cache_manager, **kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 1
executor_kwargs = {"server_config": server_config,
"timeout_multiplier": timeout_multiplier,
"debug_info": kwargs["debug_info"]}
if test_type == "reftest":
executor_kwargs["screenshot_cache"] = cache_manager.dict()
return executor_kwargs
def strip_server(url):
"""Remove the scheme and netloc from a url, leaving only the path and any query
or fragment.
url - the url to strip
e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
url_parts = list(urlparse.urlsplit(url))
url_parts[0] = ""
url_parts[1] = ""
return urlparse.urlunsplit(url_parts)
class TestharnessResultConverter(object):
harness_codes = {0: "OK",
1: "ERROR",
2: "TIMEOUT"}
test_codes = {0: "PASS",
1: "FAIL",
2: "TIMEOUT",
3: "NOTRUN"}
def __call__(self, test, result):
"""Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
result_url, status, message, stack, subtest_results = result
assert result_url == test.url, ("Got results from %s, expected %s" %
(result_url, test.url))
harness_result = test.result_cls(self.harness_codes[status], message)
return (harness_result,
[test.subtest_result_cls(name, self.test_codes[status], message, stack)
for name, status, message, stack in subtest_results])
testharness_result_converter = TestharnessResultConverter()
def reftest_result_converter(self, test, result):
return (test.result_cls(result["status"], result["message"],
extra=result.get("extra")), [])
class ExecutorException(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
class TestExecutor(object):
__metaclass__ = ABCMeta
test_type = None
convert_result = None
def __init__(self, browser, server_config, timeout_multiplier=1,
debug_info=None):
"""Abstract Base class for object that actually executes the tests in a
specific browser. Typically there will be a different TestExecutor
subclass for each test type and method of executing tests.
:param browser: ExecutorBrowser instance providing properties of the
browser that will be tested.
:param server_config: Dictionary of wptserve server configuration of the
form stored in TestEnvironment.external_config
:param timeout_multiplier: Multiplier relative to base timeout to use
when setting test timeout.
"""
self.runner = None
self.browser = browser
self.server_config = server_config
self.timeout_multiplier = timeout_multiplier
self.debug_info = debug_info
self.last_environment = {"protocol": "http",
"prefs": {}}
self.protocol = None # This must be set in subclasses
@property
def logger(self):
"""StructuredLogger for this executor"""
if self.runner is not None:
return self.runner.logger
def setup(self, runner):
"""Run steps needed before tests can be started e.g. connecting to
browser instance
:param runner: TestRunner instance that is going to run the tests"""
self.runner = runner
self.protocol.setup(runner)
def teardown(self):
"""Run cleanup steps after tests have finished"""
self.protocol.teardown()
def run_test(self, test):
"""Run a particular test.
:param test: The test to run"""
if test.environment != self.last_environment:
self.on_environment_change(test.environment)
try:
result = self.do_test(test)
except Exception as e:
result = self.result_from_exception(test, e)
if result is Stop:
return result
if result[0].status == "ERROR":
self.logger.debug(result[0].message)
self.last_environment = test.environment
self.runner.send_message("test_ended", test, result)
def server_url(self, protocol):
return "%s://%s:%s" % (protocol,
self.server_config["host"],
self.server_config["ports"][protocol][0])
def test_url(self, test):
return urlparse.urljoin(self.server_url(test.environment["protocol"]), test.url)
@abstractmethod
def do_test(self, test):
"""Test-type and protocol specific implementation of running a
specific test.
:param test: The test to run."""
pass
def on_environment_change(self, new_environment):
pass
def result_from_exception(self, test, e):
if hasattr(e, "status") and e.status in test.result_cls.statuses:
status = e.status
else:
status = "ERROR"
message = unicode(getattr(e, "message", ""))
if message:
message += "\n"
message += traceback.format_exc(e)
return test.result_cls(status, message), []
class TestharnessExecutor(TestExecutor):
convert_result = testharness_result_converter
class RefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
def __init__(self, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
debug_info=None):
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.screenshot_cache = screenshot_cache
class RefTestImplementation(object):
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
# Cache of url:(screenshot hash, screenshot). Typically the
# screenshot is None, but we set this value if a test fails
# and the screenshot was taken from the cache so that we may
# retrieve the screenshot from the cache directly in the future
self.screenshot_cache = self.executor.screenshot_cache
self.message = None
@property
def logger(self):
return self.executor.logger
def get_hash(self, test):
timeout = test.timeout * self.timeout_multiplier
if test.url not in self.screenshot_cache:
success, data = self.executor.screenshot(test)
if not success:
return False, data
screenshot = data
hash_value = hashlib.sha1(screenshot).hexdigest()
self.screenshot_cache[test.url] = (hash_value, None)
rv = True, (hash_value, screenshot)
else:
rv = True, self.screenshot_cache[test.url]
self.message.append("%s %s" % (test.url, rv[1][0]))
return rv
def is_pass(self, lhs_hash, rhs_hash, relation):
assert relation in ("==", "!=")
self.message.append("Testing %s %s %s" % (lhs_hash, relation, rhs_hash))
return ((relation == "==" and lhs_hash == rhs_hash) or
(relation == "!=" and lhs_hash != rhs_hash))
def run_test(self, test):
self.message = []
# Depth-first search of reference tree, with the goal
# of reachings a leaf node with only pass results
stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
while stack:
hashes = [None, None]
screenshots = [None, None]
nodes, relation = stack.pop()
for i, node in enumerate(nodes):
success, data = self.get_hash(node)
if success is False:
return {"status": data[0], "message": data[1]}
hashes[i], screenshots[i] = data
if self.is_pass(hashes[0], hashes[1], relation):
if nodes[1].references:
stack.extend(list(((nodes[1], item[0]), item[1]) for item in reversed(nodes[1].references)))
else:
# We passed
return {"status":"PASS", "message": None}
# We failed, so construct a failure message
for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
if screenshot is None:
success, screenshot = self.retake_screenshot(node)
if success:
screenshots[i] = screenshot
log_data = [{"url": nodes[0].url, "screenshot": screenshots[0]}, relation,
{"url": nodes[1].url, "screenshot": screenshots[1]}]
return {"status": "FAIL",
"message": "\n".join(self.message),
"extra": {"reftest_screenshots": log_data}}
def retake_screenshot(self, node):
success, data = self.executor.screenshot(node)
if not success:
return False, data
hash_val, _ = self.screenshot_cache[node.url]
self.screenshot_cache[node.url] = hash_val, data
return True, data
class Protocol(object):
def __init__(self, executor, browser):
self.executor = executor
self.browser = browser
@property
def logger(self):
return self.executor.logger
def setup(self, runner):
pass
def teardown(self):
pass
def wait(self):
pass
| akumar21NCSU/servo | tests/wpt/harness/wptrunner/executors/base.py | Python | mpl-2.0 | 10,114 |
#######################################################################
# This file is part of Pyblosxom.
#
# Copyright (C) 2010-2011 by the Pyblosxom team. See AUTHORS.
#
# Pyblosxom is distributed under the MIT license. See the file
# LICENSE for distribution details.
#######################################################################
from Pyblosxom.blosxom import blosxom_process_path_info
from Pyblosxom.tests import UnitTestBase
from Pyblosxom import tools
class Testpathinfo(UnitTestBase):
"""pyblosxom.blosxom_process_path_info
This tests default parsing of the path.
"""
def _basic_test(self, pathinfo, expected, cfg=None, http=None, data=None):
_http = {"PATH_INFO": pathinfo}
if http:
_http.update(http)
req = self.build_request(cfg=cfg, http=_http, data=data)
blosxom_process_path_info(args={"request": req})
# print repr(expected), repr(req.data)
self.cmpdict(expected, req.data)
def test_root(self):
entries = self.build_file_set([])
self.setup_files(entries)
try:
# /
self._basic_test("/",
{"bl_type": "dir",
"pi_yr": "", "pi_mo": "", "pi_da": "",
"flavour": "html"})
# /index
self._basic_test("/index",
{"bl_type": "dir",
"pi_yr": "", "pi_mo": "", "pi_da": "",
"flavour": "html"})
# /index.xml
self._basic_test("/index.xml",
{"bl_type": "dir",
"pi_yr": "", "pi_mo": "", "pi_da": "",
"flavour": "xml"})
finally:
self.tearDown()
def test_files(self):
entries = self.build_file_set(["file1.txt",
"cata/file2.txt",
"catb/file3.txt"])
self.setup_files(entries)
try:
# /file1
self._basic_test("/file1",
{"bl_type": "file",
"pi_yr": "", "pi_mo": "", "pi_da": "",
"flavour": "html"})
# /cata/file2
self._basic_test("/cata/file2",
{"bl_type": "file",
"pi_yr": "", "pi_mo": "", "pi_da": "",
"flavour": "html"})
finally:
self.tearDown()
def test_categories(self):
entries = self.build_file_set(["cata/entry1.txt",
"cata/suba/entry1.txt",
"catb/entry1.txt"])
self.setup_files(entries)
try:
# /cata
self._basic_test("/cata",
{"bl_type": "dir",
"pi_yr": "", "pi_mo": "", "pi_da": "",
"flavour": "html"})
# /cata/
self._basic_test("/cata/",
{"bl_type": "dir",
"pi_yr": "", "pi_mo": "", "pi_da": "",
"flavour": "html"})
# /cata/suba
self._basic_test("/cata/suba",
{"bl_type": "dir",
"pi_yr": "", "pi_mo": "", "pi_da": "",
"flavour": "html"})
# /cata/suba
self._basic_test("/cata/suba/entry1.html",
{"bl_type": "file",
"pi_yr": "", "pi_mo": "", "pi_da": "",
"flavour": "html"})
finally:
self.tearDown()
def test_dates(self):
tools.initialize({})
self._basic_test("/2002",
{"bl_type": "dir",
"pi_yr": "2002", "pi_mo": "", "pi_da": "",
"flavour": "html"})
self._basic_test("/2002/02",
{"bl_type": "dir",
"pi_yr": "2002", "pi_mo": "02", "pi_da": "",
"flavour": "html"})
self._basic_test("/2002/02/04",
{"bl_type": "dir",
"pi_yr": "2002", "pi_mo": "02", "pi_da": "04",
"flavour": "html"})
def test_categories_and_dates(self):
tools.initialize({})
entries = self.build_file_set(["cata/entry1.txt",
"cata/suba/entry1.txt",
"catb/entry1.txt"])
self.setup_files(entries)
try:
# /2006/cata/
self._basic_test("/2006/cata/",
{"bl_type": "dir",
"pi_yr": "2006", "pi_mo": "", "pi_da": "",
"flavour": "html"})
# /2006/04/cata/
self._basic_test("/2006/04/cata/",
{"bl_type": "dir",
"pi_yr": "2006", "pi_mo": "04", "pi_da": "",
"flavour": "html"})
# /2006/04/02/cata/
self._basic_test("/2006/04/02/cata/",
{"bl_type": "dir",
"pi_yr": "2006", "pi_mo": "04", "pi_da": "02",
"flavour": "html"})
# /2006/04/02/cata/suba/
self._basic_test("/2006/04/02/cata/suba/",
{"bl_type": "dir",
"pi_yr": "2006", "pi_mo": "04", "pi_da": "02",
"flavour": "html"})
finally:
self.tearDown()
def test_date_categories(self):
tools.initialize({})
entries = self.build_file_set(["2007/entry1.txt",
"2007/05/entry3.txt",
"cata/entry2.txt"])
self.setup_files(entries)
try:
# /2007/ 2007 here is a category
self._basic_test("/2007/",
{"bl_type": "dir",
"pi_yr": "", "pi_mo": "", "pi_da": "",
"flavour": "html"})
# /2007/05 2007/05 here is a category
self._basic_test("/2007/05",
{"bl_type": "dir",
"pi_yr": "", "pi_mo": "", "pi_da": "",
"flavour": "html"})
# /2007/05/entry3 2007/05/entry3 is a file
self._basic_test("/2007/05/entry3.html",
{"bl_type": "file",
"pi_yr": "", "pi_mo": "", "pi_da": "",
"flavour": "html"})
finally:
self.tearDown()
def test_flavour(self):
# flavour var tests
# The flavour is the default flavour, the extension of the request,
# or the flav= querystring.
root = self.get_temp_dir()
tools.initialize({})
entries = self.build_file_set(["2007/entry1.txt",
"2007/05/entry3.txt",
"cata/entry2.txt"])
self.setup_files(entries)
try:
self._basic_test("/", {"flavour": "html"})
self._basic_test("/index.xml", {"flavour": "xml"})
self._basic_test("/cata/index.foo", {"flavour": "foo"})
# FIXME - need a test for querystring
# self._basic_test( "/cata/index.foo", http={ "QUERY_STRING": "flav=bar" },
# expected={ "flavour": "bar" } )
# test that we pick up the default_flavour config variable
self._basic_test("/", cfg={"default_flavour": "foo"},
expected={"flavour": "foo"})
# FIXME - need tests for precedence of flavour indicators
finally:
self.tearDown()
def test_url(self):
# url var tests
# The url is the HTTP PATH_INFO env variable.
tools.initialize({})
entries = self.build_file_set(["2007/entry1.txt",
"2007/05/entry3.txt",
"cata/entry2.txt"])
self.setup_files(entries)
try:
self._basic_test("/", {"url": "http://www.example.com/"})
self._basic_test("/index.xml", {"url": "http://www.example.com/index.xml"})
self._basic_test("/cata/index.foo", {"url": "http://www.example.com/cata/index.foo"})
finally:
self.tearDown()
def test_pi_bl(self):
# pi_bl var tests
# pi_bl is the entry the user requested to see if the request indicated
# a specific entry. It's the empty string otherwise.
tools.initialize({})
entries = self.build_file_set(["2007/entry1.txt",
"2007/05/entry3.txt",
"cata/entry2.txt"])
self.setup_files(entries)
try:
self._basic_test("", {"pi_bl": ""})
self._basic_test("/", {"pi_bl": "/"})
self._basic_test("/index.xml", {"pi_bl": "/index.xml"})
self._basic_test("/2007/index.xml", {"pi_bl": "/2007/index.xml"})
self._basic_test("/cata/entry2", {"pi_bl": "/cata/entry2"})
finally:
self.tearDown()
| maru-sama/pyblosxom | Pyblosxom/tests/test_pathinfo.py | Python | mit | 9,629 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: installp
author:
- Kairo Araujo (@kairoaraujo)
short_description: Manage packages on AIX
description:
- Manage packages using 'installp' on AIX
version_added: '2.8'
options:
accept_license:
description:
- Whether to accept the license for the package(s).
type: bool
default: no
name:
description:
- One or more packages to install or remove.
- Use C(all) to install all packages available on informed C(repository_path).
type: list
required: true
aliases: [ pkg ]
repository_path:
description:
- Path with AIX packages (required to install).
type: path
state:
description:
- Whether the package needs to be present on or absent from the system.
type: str
choices: [ absent, present ]
default: present
notes:
- If the package is already installed, even the package/fileset is new, the module will not install it.
'''
EXAMPLES = r'''
- name: Install package foo
installp:
name: foo
repository_path: /repository/AIX71/installp/base
package_license: yes
state: present
- name: Install bos.sysmgt that includes bos.sysmgt.nim.master, bos.sysmgt.nim.spot
installp:
name: bos.sysmgt
repository_path: /repository/AIX71/installp/base
package_license: yes
state: present
- name: Install bos.sysmgt.nim.master only
installp:
name: bos.sysmgt.nim.master
repository_path: /repository/AIX71/installp/base
package_license: yes
state: present
- name: Install bos.sysmgt.nim.master and bos.sysmgt.nim.spot
installp:
name: bos.sysmgt.nim.master, bos.sysmgt.nim.spot
repository_path: /repository/AIX71/installp/base
package_license: yes
state: present
- name: Remove packages bos.sysmgt.nim.master
installp:
name: bos.sysmgt.nim.master
state: absent
'''
RETURN = r''' # '''
import os
import re
from ansible.module_utils.basic import AnsibleModule
def _check_new_pkg(module, package, repository_path):
"""
Check if the package of fileset is correct name and repository path.
:param module: Ansible module arguments spec.
:param package: Package/fileset name.
:param repository_path: Repository package path.
:return: Bool, package information.
"""
if os.path.isdir(repository_path):
installp_cmd = module.get_bin_path('installp', True)
rc, package_result, err = module.run_command("%s -l -MR -d %s" % (installp_cmd, repository_path))
if rc != 0:
module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
if package == 'all':
pkg_info = "All packages on dir"
return True, pkg_info
else:
pkg_info = {}
for line in package_result.splitlines():
if re.findall(package, line):
pkg_name = line.split()[0].strip()
pkg_version = line.split()[1].strip()
pkg_info[pkg_name] = pkg_version
return True, pkg_info
return False, None
else:
module.fail_json(msg="Repository path %s is not valid." % repository_path)
def _check_installed_pkg(module, package, repository_path):
"""
Check the package on AIX.
It verifies if the package is installed and informations
:param module: Ansible module parameters spec.
:param package: Package/fileset name.
:param repository_path: Repository package path.
:return: Bool, package data.
"""
lslpp_cmd = module.get_bin_path('lslpp', True)
rc, lslpp_result, err = module.run_command("%s -lcq %s*" % (lslpp_cmd, package))
if rc == 1:
package_state = ' '.join(err.split()[-2:])
if package_state == 'not installed.':
return False, None
else:
module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err)
if rc != 0:
module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err)
pkg_data = {}
full_pkg_data = lslpp_result.splitlines()
for line in full_pkg_data:
pkg_name, fileset, level = line.split(':')[0:3]
pkg_data[pkg_name] = fileset, level
return True, pkg_data
def remove(module, installp_cmd, packages):
repository_path = None
remove_count = 0
removed_pkgs = []
not_found_pkg = []
for package in packages:
pkg_check, dummy = _check_installed_pkg(module, package, repository_path)
if pkg_check:
if not module.check_mode:
rc, remove_out, err = module.run_command("%s -u %s" % (installp_cmd, package))
if rc != 0:
module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
remove_count += 1
removed_pkgs.append(package)
else:
not_found_pkg.append(package)
if remove_count > 0:
if len(not_found_pkg) > 1:
not_found_pkg.insert(0, "Package(s) not found: ")
changed = True
msg = "Packages removed: %s. %s " % (' '.join(removed_pkgs), ' '.join(not_found_pkg))
else:
changed = False
msg = ("No packages removed, all packages not found: %s" % ' '.join(not_found_pkg))
return changed, msg
def install(module, installp_cmd, packages, repository_path, accept_license):
installed_pkgs = []
not_found_pkgs = []
already_installed_pkgs = {}
accept_license_param = {
True: '-Y',
False: '',
}
# Validate if package exists on repository path.
for package in packages:
pkg_check, pkg_data = _check_new_pkg(module, package, repository_path)
# If package exists on repository path, check if package is installed.
if pkg_check:
pkg_check_current, pkg_info = _check_installed_pkg(module, package, repository_path)
# If package is already installed.
if pkg_check_current:
# Check if package is a package and not a fileset, get version
# and add the package into already installed list
if package in pkg_info.keys():
already_installed_pkgs[package] = pkg_info[package][1]
else:
# If the package is not a package but a fileset, confirm
# and add the fileset/package into already installed list
for key in pkg_info.keys():
if package in pkg_info[key]:
already_installed_pkgs[package] = pkg_info[key][1]
else:
if not module.check_mode:
rc, out, err = module.run_command("%s -a %s -X -d %s %s" % (installp_cmd, accept_license_param[accept_license], repository_path, package))
if rc != 0:
module.fail_json(msg="Failed to run installp", rc=rc, err=err)
installed_pkgs.append(package)
else:
not_found_pkgs.append(package)
if len(installed_pkgs) > 0:
installed_msg = (" Installed: %s." % ' '.join(installed_pkgs))
else:
installed_msg = ''
if len(not_found_pkgs) > 0:
not_found_msg = (" Not found: %s." % ' '.join(not_found_pkgs))
else:
not_found_msg = ''
if len(already_installed_pkgs) > 0:
already_installed_msg = (" Already installed: %s." % already_installed_pkgs)
else:
already_installed_msg = ''
if len(installed_pkgs) > 0:
changed = True
msg = ("%s%s%s" % (installed_msg, not_found_msg, already_installed_msg))
else:
changed = False
msg = ("No packages installed.%s%s%s" % (installed_msg, not_found_msg, already_installed_msg))
return changed, msg
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='list', required=True, aliases=['pkg']),
repository_path=dict(type='path'),
accept_license=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
supports_check_mode=True,
)
name = module.params['name']
repository_path = module.params['repository_path']
accept_license = module.params['accept_license']
state = module.params['state']
installp_cmd = module.get_bin_path('installp', True)
if state == 'present':
if repository_path is None:
module.fail_json(msg="repository_path is required to install package")
changed, msg = install(module, installp_cmd, name, repository_path, accept_license)
elif state == 'absent':
changed, msg = remove(module, installp_cmd, name)
else:
module.fail_json(changed=False, msg="Unexpected state.")
module.exit_json(changed=changed, msg=msg)
if __name__ == '__main__':
main()
| andmos/ansible | lib/ansible/modules/packaging/os/installp.py | Python | gpl-3.0 | 9,242 |
'''
Created on June 6, 2018
Filer Guidelines: esma32-60-254_esef_reporting_manual.pdf
@author: Workiva
(c) Copyright 2022 Workiva, All rights reserved.
'''
try:
import regex as re
except ImportError:
import re
from arelle.ModelValue import qname
from arelle.XbrlConst import all, notAll, hypercubeDimension, dimensionDomain, domainMember, dimensionDefault, widerNarrower
browserMaxBase64ImageLength = 5242880 # 5MB
esefTaxonomyNamespaceURIs = {
"http://xbrl.ifrs.org/taxonomy/20",
"http://xbrl.ifrs.org/taxonomy/20",
}
disallowedURIsPattern = re.compile(
"http://xbrl.ifrs.org/taxonomy/[0-9-]{10}/full_ifrs/full_ifrs-cor_[0-9-]{10}[.]xsd|"
"http://www.esma.europa.eu/taxonomy/[0-9-]{10}/esef_all.xsd"
)
DefaultDimensionLinkroles = ("http://www.esma.europa.eu/xbrl/role/cor/ifrs-dim_role-990000",)
LineItemsNotQualifiedLinkrole = "http://www.esma.europa.eu/xbrl/role/cor/esef_role-999999"
qnDomainItemTypes = {qname("{http://www.xbrl.org/dtr/type/non-numeric}nonnum:domainItemType"),
qname("{http://www.xbrl.org/dtr/type/2020-01-21}nonnum:domainItemType")}
linkbaseRefTypes = {
"http://www.xbrl.org/2003/role/calculationLinkbaseRef": "cal",
"http://www.xbrl.org/2003/role/definitionLinkbaseRef": "def",
"http://www.xbrl.org/2003/role/labelLinkbaseRef": "lab",
"http://www.xbrl.org/2003/role/presentationLinkbaseRef": "pre",
"http://www.xbrl.org/2003/role/referenceLinkbaseRef": "ref"
}
filenamePatterns = {
"cal": "{base}-{date}_cal.xml",
"def": "{base}-{date}_def.xml",
"lab": "{base}-{date}_lab-{lang}.xml",
"pre": "{base}-{date}_pre.xml",
"ref": "{base}-{date}_ref.xml"
}
filenameRegexes = {
"cal": r"(.{1,})-[0-9]{4}-[0-9]{2}-[0-9]{2}_cal[.]xml$",
"def": r"(.{1,})-[0-9]{4}-[0-9]{2}-[0-9]{2}_def[.]xml$",
"lab": r"(.{1,})-[0-9]{4}-[0-9]{2}-[0-9]{2}_lab-[a-zA-Z]{1,8}(-[a-zA-Z0-9]{1,8})*[.]xml$",
"pre": r"(.{1,})-[0-9]{4}-[0-9]{2}-[0-9]{2}_pre[.]xml$",
"ref": r"(.{1,})-[0-9]{4}-[0-9]{2}-[0-9]{2}_ref[.]xml$"
}
mandatory = set() # mandatory element qnames
# hidden references
untransformableTypes = {"anyURI", "base64Binary", "hexBinary", "NOTATION", "QName", "time",
"token", "language"}
esefDefinitionArcroles = {
all, notAll, hypercubeDimension, dimensionDomain, domainMember, dimensionDefault,
widerNarrower
}
esefPrimaryStatementPlaceholderNames = (
# to be augmented with future IFRS releases as they come known, as well as further PFS placeholders
"StatementOfFinancialPositionAbstract",
"IncomeStatementAbstract",
"StatementOfComprehensiveIncomeAbstract",
"StatementOfCashFlowsAbstract",
"StatementOfChangesInEquityAbstract",
"StatementOfChangesInNetAssetsAvailableForBenefitsAbstract",
"StatementOfProfitOrLossAndOtherComprehensiveIncomeAbstract"
)
esefStatementsOfMonetaryDeclarationNames = {
# from Annex II para 1
"StatementOfFinancialPositionAbstract",
"StatementOfProfitOrLossAndOtherComprehensiveIncomeAbstract"
"StatementOfChangesInEquityAbstract",
"StatementOfCashFlowsAbstract",
}
esefMandatoryElementNames2020 = (
"NameOfReportingEntityOrOtherMeansOfIdentification",
"ExplanationOfChangeInNameOfReportingEntityOrOtherMeansOfIdentificationFromEndOfPrecedingReportingPeriod",
"DomicileOfEntity",
"LegalFormOfEntity",
"CountryOfIncorporation",
"AddressOfRegisteredOfficeOfEntity",
"PrincipalPlaceOfBusiness",
"DescriptionOfNatureOfEntitysOperationsAndPrincipalActivities",
"NameOfParentEntity",
"NameOfUltimateParentOfGroup"
)
esefMandatoryElementNames2022 = (
"AddressOfRegisteredOfficeOfEntity",
"CountryOfIncorporation",
"DescriptionOfAccountingPolicyForAvailableforsaleFinancialAssetsExplanatory",
"DescriptionOfAccountingPolicyForBiologicalAssetsExplanatory",
"DescriptionOfAccountingPolicyForBorrowingCostsExplanatory",
"DescriptionOfAccountingPolicyForBorrowingsExplanatory",
"DescriptionOfAccountingPolicyForBusinessCombinationsExplanatory",
"DescriptionOfAccountingPolicyForBusinessCombinationsAndGoodwillExplanatory",
"DescriptionOfAccountingPolicyForCashFlowsExplanatory",
"DescriptionOfAccountingPolicyForCollateralExplanatory",
"DescriptionOfAccountingPolicyForConstructionInProgressExplanatory",
"DescriptionOfAccountingPolicyForContingentLiabilitiesAndContingentAssetsExplanatory",
"DescriptionOfAccountingPolicyForCustomerAcquisitionCostsExplanatory",
"DescriptionOfAccountingPolicyForCustomerLoyaltyProgrammesExplanatory",
"DescriptionOfAccountingPolicyForDecommissioningRestorationAndRehabilitationProvisionsExplanatory",
"DescriptionOfAccountingPolicyForDeferredAcquisitionCostsArisingFromInsuranceContractsExplanatory",
"DescriptionOfAccountingPolicyForDeferredIncomeTaxExplanatory",
"DescriptionOfAccountingPolicyForDepreciationExpenseExplanatory",
"DescriptionOfAccountingPolicyForDerecognitionOfFinancialInstrumentsExplanatory",
"DescriptionOfAccountingPolicyForDerivativeFinancialInstrumentsExplanatory",
"DescriptionOfAccountingPolicyForDerivativeFinancialInstrumentsAndHedgingExplanatory",
"DescriptionOfAccountingPolicyToDetermineComponentsOfCashAndCashEquivalents",
"DescriptionOfAccountingPolicyForDiscontinuedOperationsExplanatory",
"DescriptionOfAccountingPolicyForDiscountsAndRebatesExplanatory",
"DescriptionOfAccountingPolicyForDividendsExplanatory",
"DescriptionOfAccountingPolicyForEarningsPerShareExplanatory",
"DescriptionOfAccountingPolicyForEmissionRightsExplanatory",
"DescriptionOfAccountingPolicyForEmployeeBenefitsExplanatory",
"DescriptionOfAccountingPolicyForEnvironmentRelatedExpenseExplanatory",
"DescriptionOfAccountingPolicyForExceptionalItemsExplanatory",
"DescriptionOfAccountingPolicyForExpensesExplanatory",
"DescriptionOfAccountingPolicyForExplorationAndEvaluationExpenditures",
"DescriptionOfAccountingPolicyForFairValueMeasurementExplanatory",
"DescriptionOfAccountingPolicyForFeeAndCommissionIncomeAndExpenseExplanatory",
"DescriptionOfAccountingPolicyForFinanceCostsExplanatory",
"DescriptionOfAccountingPolicyForFinanceIncomeAndCostsExplanatory",
"DescriptionOfAccountingPolicyForFinancialAssetsExplanatory",
"DescriptionOfAccountingPolicyForFinancialGuaranteesExplanatory",
"DescriptionOfAccountingPolicyForFinancialInstrumentsExplanatory",
"DescriptionOfAccountingPolicyForFinancialInstrumentsAtFairValueThroughProfitOrLossExplanatory",
"DescriptionOfAccountingPolicyForFinancialLiabilitiesExplanatory",
"DescriptionOfAccountingPolicyForForeignCurrencyTranslationExplanatory",
"DescriptionOfAccountingPolicyForFranchiseFeesExplanatory",
"DescriptionOfAccountingPolicyForFunctionalCurrencyExplanatory",
"DescriptionOfAccountingPolicyForGoodwillExplanatory",
"DescriptionOfAccountingPolicyForGovernmentGrants",
"DescriptionOfAccountingPolicyForHedgingExplanatory",
"DescriptionOfAccountingPolicyForHeldtomaturityInvestmentsExplanatory",
"DescriptionOfAccountingPolicyForImpairmentOfAssetsExplanatory",
"DescriptionOfAccountingPolicyForImpairmentOfFinancialAssetsExplanatory",
"DescriptionOfAccountingPolicyForImpairmentOfNonfinancialAssetsExplanatory",
"DescriptionOfAccountingPolicyForIncomeTaxExplanatory",
"DescriptionOfAccountingPolicyForInsuranceContracts",
"DescriptionOfAccountingPolicyForIntangibleAssetsAndGoodwillExplanatory",
"DescriptionOfAccountingPolicyForIntangibleAssetsOtherThanGoodwillExplanatory",
"DescriptionOfAccountingPolicyForInterestIncomeAndExpenseExplanatory",
"DescriptionOfAccountingPolicyForInvestmentInAssociates",
"DescriptionOfAccountingPolicyForInvestmentInAssociatesAndJointVenturesExplanatory",
"DescriptionOfAccountingPolicyForInvestmentPropertyExplanatory",
"DescriptionOfAccountingPolicyForInvestmentsInJointVentures",
"DescriptionOfAccountingPolicyForInvestmentsOtherThanInvestmentsAccountedForUsingEquityMethodExplanatory",
"DescriptionOfAccountingPolicyForIssuedCapitalExplanatory",
"DescriptionOfAccountingPolicyForLeasesExplanatory",
"DescriptionOfAccountingPolicyForLoansAndReceivablesExplanatory",
"DescriptionOfAccountingPolicyForMeasuringInventories",
"DescriptionOfAccountingPolicyForMiningAssetsExplanatory",
"DescriptionOfAccountingPolicyForMiningRightsExplanatory",
"DescriptionOfAccountingPolicyForNoncurrentAssetsOrDisposalGroupsClassifiedAsHeldForSaleExplanatory",
"DescriptionOfAccountingPolicyForNoncurrentAssetsOrDisposalGroupsClassifiedAsHeldForSaleAndDiscontinuedOperationsExplanatory",
"DescriptionOfAccountingPolicyForOffsettingOfFinancialInstrumentsExplanatory",
"DescriptionOfAccountingPolicyForOilAndGasAssetsExplanatory",
"DescriptionOfAccountingPolicyForProgrammingAssetsExplanatory",
"DescriptionOfAccountingPolicyForPropertyPlantAndEquipmentExplanatory",
"DescriptionOfAccountingPolicyForProvisionsExplanatory",
"DescriptionOfAccountingPolicyForReclassificationOfFinancialInstrumentsExplanatory",
"DescriptionOfAccountingPolicyForRecognisingDifferenceBetweenFairValueAtInitialRecognitionAndAmountDeterminedUsingValuationTechniqueExplanatory",
"DescriptionOfAccountingPolicyForRecognitionOfRevenue",
"DescriptionOfAccountingPolicyForRegulatoryDeferralAccountsExplanatory",
"DescriptionOfAccountingPolicyForReinsuranceExplanatory",
"DescriptionOfAccountingPolicyForRepairsAndMaintenanceExplanatory",
"DescriptionOfAccountingPolicyForRepurchaseAndReverseRepurchaseAgreementsExplanatory",
"DescriptionOfAccountingPolicyForResearchAndDevelopmentExpenseExplanatory",
"DescriptionOfAccountingPolicyForRestrictedCashAndCashEquivalentsExplanatory",
"DescriptionOfAccountingPolicyForSegmentReportingExplanatory",
"DescriptionOfAccountingPolicyForServiceConcessionArrangementsExplanatory",
"DescriptionOfAccountingPolicyForSharebasedPaymentTransactionsExplanatory",
"DescriptionOfAccountingPolicyForStrippingCostsExplanatory",
"DescriptionOfAccountingPolicyForSubsidiariesExplanatory",
"DescriptionOfAccountingPolicyForTaxesOtherThanIncomeTaxExplanatory",
"DescriptionOfAccountingPolicyForTerminationBenefits",
"DescriptionOfAccountingPolicyForTradeAndOtherPayablesExplanatory",
"DescriptionOfAccountingPolicyForTradeAndOtherReceivablesExplanatory",
"DescriptionOfAccountingPolicyForTradingIncomeAndExpenseExplanatory",
"DescriptionOfAccountingPolicyForTransactionsWithNoncontrollingInterestsExplanatory",
"DescriptionOfAccountingPolicyForTransactionsWithRelatedPartiesExplanatory",
"DescriptionOfAccountingPolicyForTreasurySharesExplanatory",
"DescriptionOfAccountingPolicyForWarrantsExplanatory",
"DescriptionOfReasonWhyFinancialStatementsAreNotEntirelyComparable",
"DescriptionOfNatureOfEntitysOperationsAndPrincipalActivities",
"DescriptionOfOtherAccountingPoliciesRelevantToUnderstandingOfFinancialStatements",
"DescriptionOfReasonForUsingLongerOrShorterReportingPeriod",
"DisclosureOfAccountingJudgementsAndEstimatesExplanatory",
"DisclosureOfAccruedExpensesAndOtherLiabilitiesExplanatory",
"DisclosureOfAllowanceForCreditLossesExplanatory",
"DisclosureOfAssetsAndLiabilitiesWithSignificantRiskOfMaterialAdjustmentExplanatory",
"DisclosureOfSignificantInvestmentsInAssociatesExplanatory",
"DisclosureOfAuditorsRemunerationExplanatory",
"DisclosureOfAuthorisationOfFinancialStatementsExplanatory",
"DisclosureOfAvailableforsaleAssetsExplanatory",
"DisclosureOfBasisOfConsolidationExplanatory",
"DisclosureOfBasisOfPreparationOfFinancialStatementsExplanatory",
"DisclosureOfBiologicalAssetsAndGovernmentGrantsForAgriculturalActivityExplanatory",
"DisclosureOfBorrowingsExplanatory",
"DisclosureOfBusinessCombinationsExplanatory",
"DisclosureOfCashAndBankBalancesAtCentralBanksExplanatory",
"DisclosureOfCashAndCashEquivalentsExplanatory",
"DisclosureOfCashFlowStatementExplanatory",
"DisclosureOfChangesInAccountingPoliciesExplanatory",
"DisclosureOfChangesInAccountingPoliciesAccountingEstimatesAndErrorsExplanatory",
"DisclosureOfClaimsAndBenefitsPaidExplanatory",
"DisclosureOfCollateralExplanatory",
"DisclosureOfCommitmentsExplanatory",
"DisclosureOfCommitmentsAndContingentLiabilitiesExplanatory",
"DisclosureOfContingentLiabilitiesExplanatory",
"DisclosureOfCostOfSalesExplanatory",
"DisclosureOfCreditRiskExplanatory",
"DisclosureOfDebtSecuritiesExplanatory",
"DisclosureOfDeferredAcquisitionCostsArisingFromInsuranceContractsExplanatory",
"DisclosureOfDeferredIncomeExplanatory",
"DisclosureOfDeferredTaxesExplanatory",
"DisclosureOfDepositsFromBanksExplanatory",
"DisclosureOfDepositsFromCustomersExplanatory",
"DisclosureOfDepreciationAndAmortisationExpenseExplanatory",
"DisclosureOfDerivativeFinancialInstrumentsExplanatory",
"DisclosureOfDiscontinuedOperationsExplanatory",
"DisclosureOfDividendsExplanatory",
"DisclosureOfEarningsPerShareExplanatory",
"DisclosureOfEffectOfChangesInForeignExchangeRatesExplanatory",
"DisclosureOfEmployeeBenefitsExplanatory",
"DisclosureOfEntitysReportableSegmentsExplanatory",
"DisclosureOfEventsAfterReportingPeriodExplanatory",
"DisclosureOfExpensesExplanatory",
"DisclosureOfExpensesByNatureExplanatory",
"DisclosureOfExplorationAndEvaluationAssetsExplanatory",
"DisclosureOfFairValueMeasurementExplanatory",
"DisclosureOfFairValueOfFinancialInstrumentsExplanatory",
"DisclosureOfFeeAndCommissionIncomeExpenseExplanatory",
"DisclosureOfFinanceCostExplanatory",
"DisclosureOfFinanceIncomeExpenseExplanatory",
"DisclosureOfFinanceIncomeExplanatory",
"DisclosureOfFinancialAssetsHeldForTradingExplanatory",
"DisclosureOfFinancialInstrumentsExplanatory",
"DisclosureOfFinancialInstrumentsAtFairValueThroughProfitOrLossExplanatory",
"DisclosureOfFinancialInstrumentsDesignatedAtFairValueThroughProfitOrLossExplanatory",
"DisclosureOfFinancialInstrumentsHeldForTradingExplanatory",
"DisclosureOfFinancialLiabilitiesHeldForTradingExplanatory",
"DisclosureOfFinancialRiskManagementExplanatory",
"DisclosureOfFirstTimeAdoptionExplanatory",
"DisclosureOfGeneralAndAdministrativeExpenseExplanatory",
"DisclosureOfGeneralInformationAboutFinancialStatementsExplanatory",
"DisclosureOfGoingConcernExplanatory",
"DisclosureOfGoodwillExplanatory",
"DisclosureOfGovernmentGrantsExplanatory",
"DisclosureOfImpairmentOfAssetsExplanatory",
"DisclosureOfIncomeTaxExplanatory",
"DisclosureOfInformationAboutEmployeesExplanatory",
"DisclosureOfInformationAboutKeyManagementPersonnelExplanatory",
"DisclosureOfInsuranceContractsExplanatory",
"DisclosureOfInsurancePremiumRevenueExplanatory",
"DisclosureOfIntangibleAssetsExplanatory",
"DisclosureOfIntangibleAssetsAndGoodwillExplanatory",
"DisclosureOfInterestExpenseExplanatory",
"DisclosureOfInterestIncomeExpenseExplanatory",
"DisclosureOfInterestIncomeExplanatory",
"DisclosureOfInventoriesExplanatory",
"DisclosureOfInvestmentContractsLiabilitiesExplanatory",
"DisclosureOfInvestmentPropertyExplanatory",
"DisclosureOfInvestmentsAccountedForUsingEquityMethodExplanatory",
"DisclosureOfInvestmentsOtherThanInvestmentsAccountedForUsingEquityMethodExplanatory",
"DisclosureOfIssuedCapitalExplanatory",
"DisclosureOfJointVenturesExplanatory",
"DisclosureOfLeasePrepaymentsExplanatory",
"DisclosureOfLeasesExplanatory",
"DisclosureOfLiquidityRiskExplanatory",
"DisclosureOfLoansAndAdvancesToBanksExplanatory",
"DisclosureOfLoansAndAdvancesToCustomersExplanatory",
"DisclosureOfMarketRiskExplanatory",
"DisclosureOfNetAssetValueAttributableToUnitholdersExplanatory",
"DisclosureOfNoncontrollingInterestsExplanatory",
"DisclosureOfNoncurrentAssetsHeldForSaleAndDiscontinuedOperationsExplanatory",
"DisclosureOfNoncurrentAssetsOrDisposalGroupsClassifiedAsHeldForSaleExplanatory",
"DisclosureOfObjectivesPoliciesAndProcessesForManagingCapitalExplanatory",
"DisclosureOfOtherAssetsExplanatory",
"DisclosureOfOtherCurrentAssetsExplanatory",
"DisclosureOfOtherCurrentLiabilitiesExplanatory",
"DisclosureOfOtherLiabilitiesExplanatory",
"DisclosureOfOtherNoncurrentAssetsExplanatory",
"DisclosureOfOtherNoncurrentLiabilitiesExplanatory",
"DisclosureOfOtherOperatingExpenseExplanatory",
"DisclosureOfOtherOperatingIncomeExpenseExplanatory",
"DisclosureOfOtherOperatingIncomeExplanatory",
"DisclosureOfPrepaymentsAndOtherAssetsExplanatory",
"DisclosureOfProfitLossFromOperatingActivitiesExplanatory",
"DisclosureOfPropertyPlantAndEquipmentExplanatory",
"DisclosureOfOtherProvisionsExplanatory",
"DisclosureOfReclassificationOfFinancialInstrumentsExplanatory",
"DisclosureOfReclassificationsOrChangesInPresentationExplanatory",
"DisclosureOfRecognisedRevenueFromConstructionContractsExplanatory"
"DisclosureOfReinsuranceExplanatory",
"DisclosureOfRelatedPartyExplanatory",
"DisclosureOfRepurchaseAndReverseRepurchaseAgreementsExplanatory",
"DisclosureOfResearchAndDevelopmentExpenseExplanatory",
"DisclosureOfReservesAndOtherEquityInterestExplanatory",
"DisclosureOfRestrictedCashAndCashEquivalentsExplanatory",
"DisclosureOfRevenueExplanatory",
"DisclosureOfServiceConcessionArrangementsExplanatory",
"DisclosureOfShareCapitalReservesAndOtherEquityInterestExplanatory",
"DisclosureOfSharebasedPaymentArrangementsExplanatory",
"DisclosureOfSummaryOfSignificantAccountingPoliciesExplanatory",
"DisclosureOfSubordinatedLiabilitiesExplanatory",
"DisclosureOfSignificantInvestmentsInSubsidiariesExplanatory",
"DisclosureOfTaxReceivablesAndPayablesExplanatory",
"DisclosureOfTradeAndOtherPayablesExplanatory",
"DisclosureOfTradeAndOtherReceivablesExplanatory",
"DisclosureOfTradingIncomeExpenseExplanatory",
"DisclosureOfTreasurySharesExplanatory",
"DescriptionOfUncertaintiesOfEntitysAbilityToContinueAsGoingConcern",
"DividendsProposedOrDeclaredBeforeFinancialStatementsAuthorisedForIssueButNotRecognisedAsDistributionToOwners",
"DividendsProposedOrDeclaredBeforeFinancialStatementsAuthorisedForIssueButNotRecognisedAsDistributionToOwnersPerShare",
"DividendsRecognisedAsDistributionsToOwnersPerShare",
"DomicileOfEntity",
"ExplanationOfDepartureFromIFRS",
"ExplanationOfFactAndBasisForPreparationOfFinancialStatementsWhenNotGoingConcernBasis",
"ExplanationOfFinancialEffectOfDepartureFromIFRS",
"ExplanationOfAssumptionAboutFutureWithSignificantRiskOfResultingInMaterialAdjustments",
"ExplanationWhyFinancialStatementsNotPreparedOnGoingConcernBasis",
"LegalFormOfEntity",
"LengthOfLifeOfLimitedLifeEntity",
"NameOfParentEntity",
"NameOfReportingEntityOrOtherMeansOfIdentification",
"NameOfUltimateParentOfGroup",
"PrincipalPlaceOfBusiness",
"StatementOfIFRSCompliance",
) | acsone/Arelle | arelle/plugin/validate/ESEF/Const.py | Python | apache-2.0 | 19,152 |
media_file_scrubber.pyimport os
import sys
walk_dir = sys.argv[1]
print('walk_dir = ' + walk_dir)
# If your current working directory may change during script execution, it's recommended to
# immediately convert program arguments to an absolute path. Then the variable root below will
# be an absolute path as well. Example:
# walk_dir = os.path.abspath(walk_dir)
print('walk_dir (absolute) = ' + os.path.abspath(walk_dir))
for root, subdirs, files in os.walk(walk_dir):
print('--\nroot = ' + root)
list_file_path = os.path.join(root, 'my-directory-list.txt')
print('list_file_path = ' + list_file_path)
with open(list_file_path, 'wb') as list_file:
for subdir in subdirs:
print('\t- subdirectory ' + subdir)
for filename in files:
file_path = os.path.join(root, filename)
print('\t- file %s (full path: %s)' % (filename, file_path))
with open(file_path, 'rb') as f:
f_content = f.read()
list_file.write(('The file %s contains:\n' % filename).encode('utf-8'))
list_file.write(f_content)
list_file.write(b'\n') | TheShellLand/pies | v3/Libraries/os/path/walk/Walking recursively through dir.py | Python | mit | 1,161 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""\
linecnt.py -- shows line count in all files (specified by file mask) in the direcory.
It is useful for counting source code lines in a software project.
Usage: linecnt.py <base directory> <file mask>
"""
__author__ = "Andrey Nordin <http://claimid.com/anrienord>"
import sys, os
from glob import glob
def subdirs(d):
return [os.path.join(d, f) for f in os.listdir(d) if os.path.isdir(os.path.join(d, f))]
def alldirs(d):
return [d] + sum([alldirs(f) for f in subdirs(d)], [])
def allfiles(d, m):
return sum([glob(os.path.join(d, m)) for d in alldirs(directory)], [])
def strcnt(f):
return len(open(f).read().splitlines())
if __name__ == "__main__":
if len(sys.argv) < 3:
print __doc__
sys.exit(1)
directory, mask = sys.argv[1:3]
print sum([strcnt(f) for f in allfiles(directory, mask)])
| pascalin/pycoon | test/linecnt.py | Python | gpl-2.0 | 904 |
# Copyright 2011 Citrix System.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from neutron_lib.api import attributes
from neutron_lib.db import model_base
from neutron_lib import exceptions
from oslo_config import cfg
import oslo_i18n
from oslo_log import log as logging
from oslo_serialization import jsonutils
from six.moves.urllib import parse
from webob import exc
from neutron._i18n import _
from neutron.api import extensions
from neutron.common import constants
from neutron import wsgi
LOG = logging.getLogger(__name__)
def ensure_if_match_supported():
"""Raises exception if 'if-match' revision matching unsupported."""
if 'revision-if-match' in (extensions.PluginAwareExtensionManager.
get_instance().extensions):
return
msg = _("This server does not support constraining operations based on "
"revision numbers")
raise exceptions.BadRequest(resource='if-match', msg=msg)
def check_request_for_revision_constraint(request):
"""Parses, verifies, and returns a constraint from a request."""
revision_number = None
for e in getattr(request.if_match, 'etags', []):
if e.startswith('revision_number='):
if revision_number is not None:
msg = _("Multiple revision_number etags are not supported.")
raise exceptions.BadRequest(resource='if-match', msg=msg)
ensure_if_match_supported()
try:
revision_number = int(e.split('revision_number=')[1])
except ValueError:
msg = _("Revision number etag must be in the format of "
"revision_number=<int>")
raise exceptions.BadRequest(resource='if-match', msg=msg)
return revision_number
def is_filter_validation_enabled():
return 'filter-validation' in (extensions.PluginAwareExtensionManager.
get_instance().extensions)
def get_filters(request, attr_info, skips=None,
is_filter_validation_supported=False):
return get_filters_from_dict(request.GET.dict_of_lists(),
attr_info,
skips,
is_filter_validation_supported)
def get_filters_from_dict(data, attr_info, skips=None,
is_filter_validation_supported=False):
"""Extracts the filters from a dict of query parameters.
Returns a dict of lists for the filters:
check=a&check=b&name=Bob&
becomes:
{'check': [u'a', u'b'], 'name': [u'Bob']}
"""
attributes.populate_project_info(attr_info)
is_empty_string_supported = is_empty_string_filtering_supported()
skips = skips or []
res = {}
invalid_keys = []
check_is_filter = False
if is_filter_validation_supported and is_filter_validation_enabled():
check_is_filter = True
for key, values in data.items():
if key in skips or hasattr(model_base.BASEV2, key):
continue
values = [v for v in values
if v or (v == "" and is_empty_string_supported)]
key_attr_info = attr_info.get(key, {})
if check_is_filter and not key_attr_info.get('is_filter'):
invalid_keys.append(key)
continue
if 'convert_list_to' in key_attr_info:
values = key_attr_info['convert_list_to'](values)
elif 'convert_to' in key_attr_info:
convert_to = key_attr_info['convert_to']
values = [convert_to(v) for v in values]
if values:
res[key] = values
if invalid_keys:
msg = _("%s is invalid attribute for filtering") % invalid_keys
raise exc.HTTPBadRequest(explanation=msg)
return res
def is_empty_string_filtering_supported():
return 'empty-string-filtering' in (extensions.PluginAwareExtensionManager.
get_instance().extensions)
def get_previous_link(request, items, id_key):
params = request.GET.copy()
params.pop('marker', None)
if items:
marker = items[0][id_key]
params['marker'] = marker
params['page_reverse'] = True
return "%s?%s" % (prepare_url(request.path_url), parse.urlencode(params))
def get_next_link(request, items, id_key):
params = request.GET.copy()
params.pop('marker', None)
if items:
marker = items[-1][id_key]
params['marker'] = marker
params.pop('page_reverse', None)
return "%s?%s" % (prepare_url(request.path_url), parse.urlencode(params))
def prepare_url(orig_url):
"""Takes a link and swaps in network_link_prefix if set."""
prefix = cfg.CONF.network_link_prefix
# Copied directly from nova/api/openstack/common.py
if not prefix:
return orig_url
url_parts = list(parse.urlsplit(orig_url))
prefix_parts = list(parse.urlsplit(prefix))
url_parts[0:2] = prefix_parts[0:2]
url_parts[2] = prefix_parts[2] + url_parts[2]
return parse.urlunsplit(url_parts).rstrip('/')
def get_limit_and_marker(request):
"""Return marker, limit tuple from request.
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
GET variables. 'marker' is the id of the last element
the client has seen, and 'limit' is the maximum number
of items to return. If limit == 0, it means we needn't
pagination, then return None.
"""
max_limit = _get_pagination_max_limit()
limit = _get_limit_param(request)
if max_limit > 0:
limit = min(max_limit, limit) or max_limit
if not limit:
return None, None
marker = request.GET.get('marker', None)
return limit, marker
def _get_pagination_max_limit():
max_limit = -1
if (cfg.CONF.pagination_max_limit.lower() !=
constants.PAGINATION_INFINITE):
try:
max_limit = int(cfg.CONF.pagination_max_limit)
if max_limit == 0:
raise ValueError()
except ValueError:
LOG.warning("Invalid value for pagination_max_limit: %s. It "
"should be an integer greater to 0",
cfg.CONF.pagination_max_limit)
return max_limit
def _get_limit_param(request):
"""Extract integer limit from request or fail."""
limit = request.GET.get('limit', 0)
try:
limit = int(limit)
if limit >= 0:
return limit
except ValueError:
pass
msg = _("Limit must be an integer 0 or greater and not '%s'") % limit
raise exceptions.BadRequest(resource='limit', msg=msg)
def list_args(request, arg):
"""Extracts the list of arg from request."""
return [v for v in request.GET.getall(arg) if v]
def get_sorts(request, attr_info):
"""Extract sort_key and sort_dir from request.
Return as: [(key1, value1), (key2, value2)]
"""
attributes.populate_project_info(attr_info)
sort_keys = list_args(request, "sort_key")
sort_dirs = list_args(request, "sort_dir")
if len(sort_keys) != len(sort_dirs):
msg = _("The number of sort_keys and sort_dirs must be same")
raise exc.HTTPBadRequest(explanation=msg)
valid_dirs = [constants.SORT_DIRECTION_ASC, constants.SORT_DIRECTION_DESC]
absent_keys = [x for x in sort_keys if x not in attr_info]
if absent_keys:
msg = _("%s is invalid attribute for sort_keys") % absent_keys
raise exc.HTTPBadRequest(explanation=msg)
invalid_dirs = [x for x in sort_dirs if x not in valid_dirs]
if invalid_dirs:
msg = (_("%(invalid_dirs)s is invalid value for sort_dirs, "
"valid value is '%(asc)s' and '%(desc)s'") %
{'invalid_dirs': invalid_dirs,
'asc': constants.SORT_DIRECTION_ASC,
'desc': constants.SORT_DIRECTION_DESC})
raise exc.HTTPBadRequest(explanation=msg)
return list(zip(sort_keys,
[x == constants.SORT_DIRECTION_ASC for x in sort_dirs]))
def get_page_reverse(request):
data = request.GET.get('page_reverse', 'False')
return data.lower() == "true"
def get_pagination_links(request, items, limit,
marker, page_reverse, key="id"):
key = key if key else 'id'
links = []
if not limit:
return links
if not (len(items) < limit and not page_reverse):
links.append({"rel": "next",
"href": get_next_link(request, items,
key)})
if not (len(items) < limit and page_reverse):
links.append({"rel": "previous",
"href": get_previous_link(request, items,
key)})
return links
def is_native_pagination_supported(plugin):
native_pagination_attr_name = ("_%s__native_pagination_support"
% plugin.__class__.__name__)
return getattr(plugin, native_pagination_attr_name, False)
def is_native_sorting_supported(plugin):
native_sorting_attr_name = ("_%s__native_sorting_support"
% plugin.__class__.__name__)
return getattr(plugin, native_sorting_attr_name, False)
def is_filter_validation_supported(plugin):
filter_validation_attr_name = ("_%s__filter_validation_support"
% plugin.__class__.__name__)
return getattr(plugin, filter_validation_attr_name, False)
class PaginationHelper(object):
def __init__(self, request, primary_key='id'):
self.request = request
self.primary_key = primary_key
def update_fields(self, original_fields, fields_to_add):
pass
def update_args(self, args):
pass
def paginate(self, items):
return items
def get_links(self, items):
return {}
class PaginationEmulatedHelper(PaginationHelper):
def __init__(self, request, primary_key='id'):
super(PaginationEmulatedHelper, self).__init__(request, primary_key)
self.limit, self.marker = get_limit_and_marker(request)
self.page_reverse = get_page_reverse(request)
def update_fields(self, original_fields, fields_to_add):
if not original_fields:
return
if self.primary_key not in original_fields:
original_fields.append(self.primary_key)
fields_to_add.append(self.primary_key)
def paginate(self, items):
if not self.limit:
return items
if not items:
return []
# first, calculate the base index for pagination
if self.marker:
i = 0
for item in items:
if item[self.primary_key] == self.marker:
break
i += 1
else:
# if marker is not found, return nothing
return []
else:
i = len(items) if self.page_reverse else 0
if self.page_reverse:
# don't wrap
return items[max(i - self.limit, 0):i]
else:
if self.marker:
# skip the matched marker
i += 1
return items[i:i + self.limit]
def get_links(self, items):
return get_pagination_links(
self.request, items, self.limit, self.marker,
self.page_reverse, self.primary_key)
class PaginationNativeHelper(PaginationEmulatedHelper):
def update_args(self, args):
if self.primary_key not in dict(args.get('sorts', [])).keys():
args.setdefault('sorts', []).append((self.primary_key, True))
args.update({'limit': self.limit, 'marker': self.marker,
'page_reverse': self.page_reverse})
def paginate(self, items):
return items
class NoPaginationHelper(PaginationHelper):
pass
class SortingHelper(object):
def __init__(self, request, attr_info):
pass
def update_args(self, args):
pass
def update_fields(self, original_fields, fields_to_add):
pass
def sort(self, items):
return items
class SortingEmulatedHelper(SortingHelper):
def __init__(self, request, attr_info):
super(SortingEmulatedHelper, self).__init__(request, attr_info)
self.sort_dict = get_sorts(request, attr_info)
def update_fields(self, original_fields, fields_to_add):
if not original_fields:
return
for key in dict(self.sort_dict).keys():
if key not in original_fields:
original_fields.append(key)
fields_to_add.append(key)
def sort(self, items):
def cmp_func(obj1, obj2):
for key, direction in self.sort_dict:
o1 = obj1[key]
o2 = obj2[key]
if o1 is None and o2 is None:
ret = 0
elif o1 is None and o2 is not None:
ret = -1
elif o1 is not None and o2 is None:
ret = 1
else:
ret = (o1 > o2) - (o1 < o2)
if ret:
return ret * (1 if direction else -1)
return 0
return sorted(items, key=functools.cmp_to_key(cmp_func))
class SortingNativeHelper(SortingHelper):
def __init__(self, request, attr_info):
self.sort_dict = get_sorts(request, attr_info)
def update_args(self, args):
args['sorts'] = self.sort_dict
class NoSortingHelper(SortingHelper):
pass
def convert_exception_to_http_exc(e, faults, language):
serializer = wsgi.JSONDictSerializer()
if isinstance(e, exceptions.MultipleExceptions):
converted_exceptions = [
convert_exception_to_http_exc(inner, faults, language)
for inner in e.inner_exceptions]
# if no internal exceptions, will be handled as single exception
if converted_exceptions:
codes = {c.code for c in converted_exceptions}
if len(codes) == 1:
# all error codes are the same so we can maintain the code
# and just concatenate the bodies
joined_msg = "\n".join(
(jsonutils.loads(c.body)['NeutronError']['message']
for c in converted_exceptions))
new_body = jsonutils.loads(converted_exceptions[0].body)
new_body['NeutronError']['message'] = joined_msg
converted_exceptions[0].body = serializer.serialize(new_body)
return converted_exceptions[0]
else:
# multiple error types so we turn it into a Conflict with the
# inner codes and bodies packed in
new_exception = exceptions.Conflict()
inner_error_strings = []
for c in converted_exceptions:
c_body = jsonutils.loads(c.body)
err = ('HTTP %s %s: %s' % (
c.code, c_body['NeutronError']['type'],
c_body['NeutronError']['message']))
inner_error_strings.append(err)
new_exception.msg = "\n".join(inner_error_strings)
return convert_exception_to_http_exc(
new_exception, faults, language)
e = translate(e, language)
body = serializer.serialize(
{'NeutronError': get_exception_data(e)})
kwargs = {'body': body, 'content_type': 'application/json'}
if isinstance(e, exc.HTTPException):
# already an HTTP error, just update with content type and body
e.body = body
e.content_type = kwargs['content_type']
return e
faults_tuple = tuple(faults.keys()) + (exceptions.NeutronException,)
if isinstance(e, faults_tuple):
for fault in faults:
if isinstance(e, fault):
mapped_exc = faults[fault]
break
else:
mapped_exc = exc.HTTPInternalServerError
return mapped_exc(**kwargs)
if isinstance(e, NotImplementedError):
# NOTE(armando-migliaccio): from a client standpoint
# it makes sense to receive these errors, because
# extensions may or may not be implemented by
# the underlying plugin. So if something goes south,
# because a plugin does not implement a feature,
# returning 500 is definitely confusing.
kwargs['body'] = serializer.serialize(
{'NotImplementedError': get_exception_data(e)})
return exc.HTTPNotImplemented(**kwargs)
# NOTE(jkoelker) Everything else is 500
# Do not expose details of 500 error to clients.
msg = _('Request Failed: internal server error while '
'processing your request.')
msg = translate(msg, language)
kwargs['body'] = serializer.serialize(
{'NeutronError': get_exception_data(exc.HTTPInternalServerError(msg))})
return exc.HTTPInternalServerError(**kwargs)
def get_exception_data(e):
"""Extract the information about an exception.
Neutron client for the v2 API expects exceptions to have 'type', 'message'
and 'detail' attributes.This information is extracted and converted into a
dictionary.
:param e: the exception to be reraised
:returns: a structured dict with the exception data
"""
err_data = {'type': e.__class__.__name__,
'message': e, 'detail': ''}
return err_data
def translate(translatable, locale):
"""Translates the object to the given locale.
If the object is an exception its translatable elements are translated
in place, if the object is a translatable string it is translated and
returned. Otherwise, the object is returned as-is.
:param translatable: the object to be translated
:param locale: the locale to translate to
:returns: the translated object, or the object as-is if it
was not translated
"""
localize = oslo_i18n.translate
if isinstance(translatable, exceptions.NeutronException):
translatable.msg = localize(translatable.msg, locale)
elif isinstance(translatable, exc.HTTPError):
translatable.detail = localize(translatable.detail, locale)
elif isinstance(translatable, Exception):
translatable.message = localize(translatable, locale)
else:
return localize(translatable, locale)
return translatable
| huntxu/neutron | neutron/api/api_common.py | Python | apache-2.0 | 19,029 |
"""Exports and imports mesh data"""
import bpy
import nose.tools
import math
import mathutils
from pyffi.formats.nif import NifFormat
from test import Base
from test import SingleNif
from test.geometry.trishape import gen_geometry
class TestBaseGeometry(SingleNif):
"""Test base geometry, single blender object."""
n_name = 'geometry/base_geometry'
# (documented in base class)
b_name = 'Cube'
"""Name of the blender object."""
b_verts = {
(-7.5, 7.5, 3.5),
(7.5, 3.75, 1.75),
(7.5, -3.75, -1.75),
(7.5, 3.75, -1.75),
(-7.5, 7.5, -3.5),
(-7.5, -7.5, 3.5),
(7.5, -3.75, 1.75),
(-7.5, -7.5, -3.5),
}
"""Vertex locations, for testing."""
def b_create_objects(self):
# (documented in base class)
self.b_create_base_geometry()
def b_create_base_geometry(self):
"""Create and return a single polyhedron blender object."""
# create a base mesh, and set its name
bpy.ops.mesh.primitive_cube_add()
b_obj = bpy.data.objects[bpy.context.active_object.name]
b_obj.name = self.b_name
# transform it into something less trivial
self.b_scale_object(b_obj)
self.b_scale_single_face(b_obj)
b_obj.matrix_local = self.b_get_transform_matrix()
# primitive_cube_add sets double sided flag, fix this
b_obj.data.show_double_sided = False
return b_obj
def b_scale_object(self, b_obj):
"""Scale the object differently along each axis."""
bpy.ops.transform.resize(value=(7.5,1,1), constraint_axis=(True,False,False))
bpy.ops.transform.resize(value=(1,7.5,1), constraint_axis=(False,True,False))
bpy.ops.transform.resize(value=(1,1,3.5), constraint_axis=(False,False,True))
bpy.ops.object.transform_apply(scale=True)
def b_scale_single_face(self, b_obj):
"""Scale a single face of the object."""
# scale single face
for faces in b_obj.data.faces:
faces.select = False
b_obj.data.faces[2].select = True
for b_vert_index in b_obj.data.faces[2].vertices:
b_obj.data.vertices[b_vert_index].co[1] = b_obj.data.vertices[b_vert_index].co[1] * 0.5
b_obj.data.vertices[b_vert_index].co[2] = b_obj.data.vertices[b_vert_index].co[2] * 0.5
def b_get_transform_matrix(self):
"""Return a non-trivial transform matrix."""
b_trans_mat = mathutils.Matrix.Translation((20.0, 20.0, 20.0))
b_rot_mat_x = mathutils.Matrix.Rotation(math.radians(30.0), 4, 'X')
b_rot_mat_y = mathutils.Matrix.Rotation(math.radians(60.0), 4, 'Y')
b_rot_mat_z = mathutils.Matrix.Rotation(math.radians(90.0), 4, 'Z')
b_rot_mat = b_rot_mat_x * b_rot_mat_y * b_rot_mat_z
b_scale_mat = mathutils.Matrix.Scale(0.75, 4)
b_transform_mat = b_trans_mat * b_rot_mat * b_scale_mat
return b_transform_mat
def b_check_data(self):
b_obj = bpy.data.objects[self.b_name]
self.b_check_geom_obj(b_obj)
def b_check_geom_obj(self, b_obj):
b_mesh = b_obj.data
self.b_check_geom(b_mesh)
self.b_check_transform(b_obj)
def b_check_transform(self, b_obj):
b_loc_vec, b_rot_quat, b_scale_vec = b_obj.matrix_local.decompose() # transforms
nose.tools.assert_equal(b_obj.location, mathutils.Vector((20.0, 20.0, 20.0))) # location
b_rot_quat.to_euler()
b_rot_eul = b_rot_quat
nose.tools.assert_equal((b_rot_eul.x - math.radians(30.0)) < self.EPSILON, True) # x rotation
nose.tools.assert_equal((b_rot_eul.y - math.radians(60.0)) < self.EPSILON, True) # y rotation
nose.tools.assert_equal((b_rot_eul.z - math.radians(90.0)) < self.EPSILON, True) # z rotation
nose.tools.assert_equal((b_obj.scale - mathutils.Vector((0.75, 0.75, 0.75)))
< mathutils.Vector((self.EPSILON,self.EPSILON,self.EPSILON)), True) # uniform scale
def b_check_geom(self, b_mesh):
num_triangles = len( [face for face in b_mesh.faces if len(face.vertices) == 3]) # check for tri
num_triangles += 2 * len( [face for face in b_mesh.faces if len(face.vertices) == 4]) # face = 2 tris
nose.tools.assert_equal(len(b_mesh.vertices), 8)
nose.tools.assert_equal(num_triangles, 12)
verts = {
tuple(round(co, 4) for co in vert.co)
for vert in b_mesh.vertices
}
nose.tools.assert_set_equal(verts, self.b_verts)
def n_create_data(self):
return gen_geometry.n_create_data()
def n_check_data(self, n_data):
n_trishape = n_data.roots[0].children[0]
self.n_check_trishape(n_trishape)
self.n_check_transform(n_trishape)
self.n_check_trishape_data(n_trishape.data)
def n_check_trishape(self, n_geom):
nose.tools.assert_is_instance(n_geom, NifFormat.NiTriShape)
def n_check_transform(self, n_geom):
nose.tools.assert_equal(n_geom.translation.as_tuple(),(20.0, 20.0, 20.0)) # location
n_rot_eul = mathutils.Matrix(n_geom.rotation.as_tuple()).to_euler()
nose.tools.assert_equal((n_rot_eul.x - math.radians(30.0)) < self.EPSILON, True) # x rotation
nose.tools.assert_equal((n_rot_eul.y - math.radians(60.0)) < self.EPSILON, True) # y rotation
nose.tools.assert_equal((n_rot_eul.z - math.radians(90.0)) < self.EPSILON, True) # z rotation
nose.tools.assert_equal(n_geom.scale - 0.75 < self.EPSILON, True) # scale
def n_check_trishape_data(self, n_trishape_data):
nose.tools.assert_equal(n_trishape_data.num_vertices, 8)
nose.tools.assert_equal(n_trishape_data.num_triangles, 12)
verts = {
tuple(round(co, 4) for co in vert.as_list())
for vert in n_trishape_data.vertices
}
nose.tools.assert_set_equal(verts, self.b_verts)
#TODO: Additional checks needed.
#TriData
# Flags: blender exports| Continue, Maya| Triangles, Pyffi| Bound.
# Consistancy:
# radius:
class TestNonUniformlyScaled(Base):
def setup(self):
# create a non-uniformly scaled cube
bpy.ops.mesh.primitive_cube_add()
b_obj = bpy.data.objects["Cube"]
b_obj.scale = (1, 2, 3)
@nose.tools.raises(Exception)
def test_export(self):
bpy.ops.export_scene.nif(
filepath="test/export/non_uniformly_scaled_cube.nif",
log_level='DEBUG',
)
| amorilia/blender_nif_plugin | testframework/test/geometry/trishape/test_geometry.py | Python | bsd-3-clause | 6,720 |
# Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = ''
## Specify a config file to load.
#c.JupyterApp.config_file_name = ''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## Whether to allow the user to run the notebook as root.
#c.NotebookApp.allow_root = False
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
c.NotebookApp.base_url = '/jupyter/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which messages can be sent on iopub before they
# are limited.
#c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
#c.NotebookApp.ip = 'localhost'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = ''
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
#c.NotebookApp.notebook_dir = ''
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
#c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
c.NotebookApp.password = 'sha1:6c1a5cca33dc:30f31ede1973570aa5e471d9d5537852a5f9386b'
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine though ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
#c.NotebookApp.password_required = False
## The port the notebook server will listen on.
c.NotebookApp.port = 8000
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
#c.NotebookApp.terminado_settings = {}
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'username'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
## https://github.com/jbwhit/til/blob/master/jupyter/autosave_html_py.md
import os
from subprocess import check_call
from queue import Queue
from threading import Thread
import nbformat
from tempfile import TemporaryFile
class PostSave:
__queue = Queue()
def __init__(self):
t = Thread(target=self.__worker)
t.start()
def __worker(self):
while True:
args, kwargs = self.__queue.get()
self.__convert(*args, **kwargs)
self.__queue.task_done()
@staticmethod
def __convert(model, os_path, contents_manager):
d, fname = os.path.split(os_path)
if model['type'] == 'notebook':
check_call(['jupyter', 'nbconvert', '--to', 'html', fname], cwd=d)
def __call__(self, *args, **kwargs):
self.__queue.put((args, kwargs))
# Convert .ipynb files into .html after each save.
c.FileContentsManager.post_save_hook = PostSave()
| hombit/scientific_python | misc/share_jupyter/jupyter/jupyter_notebook_config.py | Python | mit | 23,610 |
"""Parallel workflow execution via Condor DAGMan
"""
import os
import sys
from .base import (GraphPluginBase, logger)
from ...interfaces.base import CommandLine
class CondorDAGManPlugin(GraphPluginBase):
"""Execute using Condor DAGMan
The plugin_args input to run can be used to control the DAGMan execution.
Currently supported options are:
- template : submit spec template to use for job submission. The template
all generated submit specs are appended to this template. This
can be a str or a filename.
- submit_specs : additional submit specs that are appended to the generated
submit specs to allow for overriding or extending the defaults.
This can be a str or a filename.
- dagman_args : arguments to be prepended to the job execution script in the
dagman call
"""
# XXX feature wishlist
# - infer data file dependencies from jobs
# - infer CPU requirements from jobs
# - infer memory requirements from jobs
# - looks like right now all jobs come in here, regardless of whether they
# actually have to run. would be good to be able to decide whether they
# actually have to be scheduled (i.e. output already exist).
def __init__(self, **kwargs):
self._template = "universe = vanilla\nnotification = Never"
self._submit_specs = ""
self._dagman_args = ""
if 'plugin_args' in kwargs:
plugin_args = kwargs['plugin_args']
if 'template' in plugin_args:
self._template = plugin_args['template']
if os.path.isfile(self._template):
self._template = open(self._template).read()
if 'submit_specs' in plugin_args:
self._submit_specs = plugin_args['submit_specs']
if os.path.isfile(self._submit_specs):
self._submit_specs = open(self._submit_specs).read()
if 'dagman_args' in plugin_args:
self._dagman_args = plugin_args['dagman_args']
super(CondorDAGManPlugin, self).__init__(**kwargs)
def _submit_graph(self, pyfiles, dependencies):
# location of all scripts, place dagman output in here too
batch_dir, _ = os.path.split(pyfiles[0])
# DAG description filename
dagfilename = os.path.join(batch_dir, 'workflow.dag')
with open(dagfilename, 'wt') as dagfileptr:
# loop over all scripts, create submit files, and define them
# as jobs in the DAG
for idx, pyscript in enumerate(pyfiles):
# XXX redundant with previous value? or could it change between
# scripts?
batch_dir, name = os.path.split(pyscript)
name = '.'.join(name.split('.')[:-1])
submitspec = '\n'.join(
(self._template,
'executable = %s' % sys.executable,
'arguments = %s' % pyscript,
'output = %s' % os.path.join(batch_dir,
'%s.out' % name),
'error = %s' % os.path.join(batch_dir,
'%s.err' % name),
'log = %s' % os.path.join(batch_dir,
'%s.log' % name),
'getenv = True',
self._submit_specs,
'queue'
))
# write submit spec for this job
submitfile = os.path.join(batch_dir,
'%s.submit' % name)
with open(submitfile, 'wt') as submitfileprt:
submitfileprt.writelines(submitspec)
submitfileprt.close()
# define job in DAG
dagfileptr.write('JOB %i %s\n' % (idx, submitfile))
# define dependencies in DAG
for child in dependencies:
parents = dependencies[child]
if len(parents):
dagfileptr.write('PARENT %s CHILD %i\n'
% (' '.join([str(i) for i in parents]),
child))
# hand over DAG to condor_dagman
cmd = CommandLine('condor_submit_dag', environ=os.environ.data)
# needs -update_submit or re-running a workflow will fail
cmd.inputs.args = '-update_submit %s %s' % (dagfilename,
self._dagman_args)
cmd.run()
logger.info('submitted all jobs to Condor DAGMan')
| christianbrodbeck/nipype | nipype/pipeline/plugins/dagman.py | Python | bsd-3-clause | 4,865 |
"""Conditional module is the xmodule, which you can use for disabling
some xmodules by conditions.
"""
import json
import logging
from lazy import lazy
from lxml import etree
from pkg_resources import resource_string
from xblock.fields import ReferenceList, Scope, String
from xblock.fragment import Fragment
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.seq_module import SequenceDescriptor
from xmodule.studio_editable import StudioEditableDescriptor, StudioEditableModule
from xmodule.validation import StudioValidation, StudioValidationMessage
from xmodule.x_module import STUDENT_VIEW, XModule
log = logging.getLogger('edx.' + __name__)
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class ConditionalFields(object):
has_children = True
display_name = String(
display_name=_("Display Name"),
help=_("The display name for this component."),
scope=Scope.settings,
default=_('Conditional')
)
show_tag_list = ReferenceList(
help=_("List of urls of children that are references to external modules"),
scope=Scope.content
)
sources_list = ReferenceList(
display_name=_("Source Components"),
help=_("The component location IDs of all source components that are used to determine whether a learner is "
"shown the content of this conditional module. Copy the component location ID of a component from its "
"Settings dialog in Studio."),
scope=Scope.content
)
conditional_attr = String(
display_name=_("Conditional Attribute"),
help=_("The attribute of the source components that determines whether a learner is shown the content of this "
"conditional module."),
scope=Scope.content,
default='correct',
values=lambda: [{'display_name': xml_attr, 'value': xml_attr}
for xml_attr in ConditionalModule.conditions_map.keys()]
)
conditional_value = String(
display_name=_("Conditional Value"),
help=_("The value that the conditional attribute of the source components must match before a learner is shown "
"the content of this conditional module."),
scope=Scope.content,
default='True'
)
conditional_message = String(
display_name=_("Blocked Content Message"),
help=_("The message that is shown to learners when not all conditions are met to show the content of this "
"conditional module. Include {link} in the text of your message to give learners a direct link to "
"required units. For example, 'You must complete {link} before you can access this unit'."),
scope=Scope.content,
default=_('You must complete {link} before you can access this unit.')
)
class ConditionalModule(ConditionalFields, XModule, StudioEditableModule):
"""
Blocks child module from showing unless certain conditions are met.
Example:
<conditional sources="i4x://.../problem_1; i4x://.../problem_2" completed="True">
<show sources="i4x://.../test_6; i4x://.../Avi_resources"/>
<video url_name="secret_video" />
</conditional>
<conditional> tag attributes:
sources - location id of required modules, separated by ';'
submitted - map to `is_submitted` module method.
(pressing RESET button makes this function to return False.)
attempted - map to `is_attempted` module method
correct - map to `is_correct` module method
poll_answer - map to `poll_answer` module attribute
voted - map to `voted` module attribute
<show> tag attributes:
sources - location id of required modules, separated by ';'
You can add you own rules for <conditional> tag, like
"completed", "attempted" etc. To do that yo must extend
`ConditionalModule.conditions_map` variable and add pair:
my_attr: my_property/my_method
After that you can use it:
<conditional my_attr="some value" ...>
...
</conditional>
And my_property/my_method will be called for required modules.
"""
js = {
'coffee': [
resource_string(__name__, 'js/src/conditional/display.coffee'),
],
'js': [
resource_string(__name__, 'js/src/javascript_loader.js'),
resource_string(__name__, 'js/src/collapsible.js'),
]
}
js_module_name = "Conditional"
css = {'scss': [resource_string(__name__, 'css/capa/display.scss')]}
# Map
# key: <tag attribute in xml>
# value: <name of module attribute>
conditions_map = {
'poll_answer': 'poll_answer', # poll_question attr
# problem was submitted (it can be wrong)
# if student will press reset button after that,
# state will be reverted
'submitted': 'is_submitted', # capa_problem attr
# if student attempted problem
'attempted': 'is_attempted', # capa_problem attr
# if problem is full points
'correct': 'is_correct',
'voted': 'voted' # poll_question attr
}
@lazy
def required_modules(self):
return [self.system.get_module(descriptor) for
descriptor in self.descriptor.get_required_module_descriptors()]
def is_condition_satisfied(self):
attr_name = self.conditions_map[self.conditional_attr]
if self.conditional_value and self.required_modules:
for module in self.required_modules:
if not hasattr(module, attr_name):
# We don't throw an exception here because it is possible for
# the descriptor of a required module to have a property but
# for the resulting module to be a (flavor of) ErrorModule.
# So just log and return false.
if module is not None:
# We do not want to log when module is None, and it is when requester
# does not have access to the requested required module.
log.warn('Error in conditional module: \
required module {module} has no {module_attr}'.format(module=module, module_attr=attr_name))
return False
attr = getattr(module, attr_name)
if callable(attr):
attr = attr()
if self.conditional_value != str(attr):
break
else:
return True
return False
def get_html(self):
# Calculate html ids of dependencies
self.required_html_ids = [descriptor.location.html_id() for
descriptor in self.descriptor.get_required_module_descriptors()]
return self.system.render_template('conditional_ajax.html', {
'element_id': self.location.html_id(),
'ajax_url': self.system.ajax_url,
'depends': ';'.join(self.required_html_ids)
})
def author_view(self, context):
"""
Renders the Studio preview by rendering each child so that they can all be seen and edited.
"""
fragment = Fragment()
root_xblock = context.get('root_xblock')
is_root = root_xblock and root_xblock.location == self.location
if is_root:
# User has clicked the "View" link. Show a preview of all possible children:
self.render_children(context, fragment, can_reorder=True, can_add=True)
# else: When shown on a unit page, don't show any sort of preview -
# just the status of this block in the validation area.
return fragment
def handle_ajax(self, _dispatch, _data):
"""This is called by courseware.moduleodule_render, to handle
an AJAX call.
"""
if not self.is_condition_satisfied():
context = {'module': self,
'message': self.conditional_message}
html = self.system.render_template('conditional_module.html',
context)
return json.dumps({'html': [html], 'message': bool(self.conditional_message)})
html = [child.render(STUDENT_VIEW).content for child in self.get_display_items()]
return json.dumps({'html': html})
def get_icon_class(self):
new_class = 'other'
# HACK: This shouldn't be hard-coded to two types
# OBSOLETE: This obsoletes 'type'
class_priority = ['video', 'problem']
child_classes = [self.system.get_module(child_descriptor).get_icon_class()
for child_descriptor in self.descriptor.get_children()]
for c in class_priority:
if c in child_classes:
new_class = c
return new_class
def validate(self):
"""
Message for either error or warning validation message/s.
Returns message and type. Priority given to error type message.
"""
return self.descriptor.validate()
class ConditionalDescriptor(ConditionalFields, SequenceDescriptor, StudioEditableDescriptor):
"""Descriptor for conditional xmodule."""
_tag_name = 'conditional'
module_class = ConditionalModule
resources_dir = None
filename_extension = "xml"
has_score = False
show_in_read_only_mode = True
def __init__(self, *args, **kwargs):
"""
Create an instance of the conditional module.
"""
super(ConditionalDescriptor, self).__init__(*args, **kwargs)
# Convert sources xml_attribute to a ReferenceList field type so Location/Locator
# substitution can be done.
if not self.sources_list:
if 'sources' in self.xml_attributes and isinstance(self.xml_attributes['sources'], basestring):
self.sources_list = [
self.location.course_key.make_usage_key_from_deprecated_string(item)
for item in ConditionalDescriptor.parse_sources(self.xml_attributes)
]
@staticmethod
def parse_sources(xml_element):
""" Parse xml_element 'sources' attr and return a list of location strings. """
sources = xml_element.get('sources')
if sources:
return [location.strip() for location in sources.split(';')]
def get_required_module_descriptors(self):
"""Returns a list of XModuleDescriptor instances upon
which this module depends.
"""
descriptors = []
for location in self.sources_list:
try:
descriptor = self.system.load_item(location)
descriptors.append(descriptor)
except ItemNotFoundError:
msg = "Invalid module by location."
log.exception(msg)
self.system.error_tracker(msg)
return descriptors
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
show_tag_list = []
definition = {}
for conditional_attr in ConditionalModule.conditions_map.iterkeys():
conditional_value = xml_object.get(conditional_attr)
if conditional_value is not None:
definition.update({
'conditional_attr': conditional_attr,
'conditional_value': str(conditional_value),
})
for child in xml_object:
if child.tag == 'show':
locations = ConditionalDescriptor.parse_sources(child)
for location in locations:
children.append(location)
show_tag_list.append(location)
else:
try:
descriptor = system.process_xml(etree.tostring(child))
children.append(descriptor.scope_ids.usage_id)
except:
msg = "Unable to load child when parsing Conditional."
log.exception(msg)
system.error_tracker(msg)
definition.update({
'show_tag_list': show_tag_list,
'conditional_message': xml_object.get('message', '')
})
return definition, children
def definition_to_xml(self, resource_fs):
xml_object = etree.Element(self._tag_name)
for child in self.get_children():
if child.location not in self.show_tag_list:
self.runtime.add_block_as_child_node(child, xml_object)
if self.show_tag_list:
show_str = u'<{tag_name} sources="{sources}" />'.format(
tag_name='show', sources=';'.join(location.to_deprecated_string() for location in self.show_tag_list))
xml_object.append(etree.fromstring(show_str))
# Overwrite the original sources attribute with the value from sources_list, as
# Locations may have been changed to Locators.
stringified_sources_list = map(lambda loc: loc.to_deprecated_string(), self.sources_list)
self.xml_attributes['sources'] = ';'.join(stringified_sources_list)
self.xml_attributes[self.conditional_attr] = self.conditional_value
self.xml_attributes['message'] = self.conditional_message
return xml_object
def validate(self):
validation = super(ConditionalDescriptor, self).validate()
if not self.sources_list:
conditional_validation = StudioValidation(self.location)
conditional_validation.add(
StudioValidationMessage(
StudioValidationMessage.NOT_CONFIGURED,
_(u"This component has no source components configured yet."),
action_class='edit-button',
action_label=_(u"Configure list of sources")
)
)
validation = StudioValidation.copy(validation)
validation.summary = conditional_validation.messages[0]
return validation
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(ConditionalDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
ConditionalDescriptor.due,
ConditionalDescriptor.is_practice_exam,
ConditionalDescriptor.is_proctored_enabled,
ConditionalDescriptor.is_time_limited,
ConditionalDescriptor.default_time_limit_minutes,
ConditionalDescriptor.show_tag_list,
ConditionalDescriptor.exam_review_rules,
])
return non_editable_fields
| pepeportela/edx-platform | common/lib/xmodule/xmodule/conditional_module.py | Python | agpl-3.0 | 14,768 |
import abc
from default_metrics import DefaultMetrics
class DefaultEnvironment(object):
"""
Abstract class for environments. All environments must implement these
methods to be able to work with SBB.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
self.metrics_ = DefaultMetrics(self)
@abc.abstractmethod
def reset(self):
"""
Method that is called at the beginning of each run by SBB, to reset the
variables that will be used by the generations.
"""
@abc.abstractmethod
def setup(self, teams_population):
"""
Method that is called at the beginning of each generation by SBB, to set the
variables that will be used by the generationand remove the ones that are no
longer being used.
"""
@abc.abstractmethod
def evaluate_point_population(self, teams_population):
"""
Evaluate the fitness of the point population, to define which points will be removed
or added in the next generation, when setup_point_population() is executed.
"""
@abc.abstractmethod
def evaluate_teams_population_for_training(self, teams_population):
"""
Evaluate all the teams using the evaluate_team() method, and sets metrics. Used only
for training.
"""
@abc.abstractmethod
def evaluate_team(self, team, mode):
"""
Evaluate the team using the environment inputs. May be executed in the training
or the test mode.
This method must set the attribute results_per_points of the team, if you intend to
use pareto.
"""
@abc.abstractmethod
def validate(self, current_generation, teams_population):
"""
For classification:
- Return the best team for the teams_population using the champion set.
For reinforcement:
- All teams go against the validation set, and then the best one go against the champion set
"""
def hall_of_fame(self):
return [] | jpbonson/SBBReinforcementLearner | SBB/environments/default_environment.py | Python | bsd-2-clause | 2,122 |
"""Miscellaneous utility functions and classes specific to ansible cli tools."""
from __future__ import absolute_import, print_function
import os
from lib.util import common_environment
def ansible_environment(args):
"""
:type args: CommonConfig
:rtype: dict[str, str]
"""
env = common_environment()
path = env['PATH']
ansible_path = os.path.join(os.getcwd(), 'bin')
if not path.startswith(ansible_path + os.pathsep):
path = ansible_path + os.pathsep + path
ansible = dict(
ANSIBLE_FORCE_COLOR='%s' % 'true' if args.color else 'false',
ANSIBLE_DEPRECATION_WARNINGS='false',
ANSIBLE_CONFIG='/dev/null',
ANSIBLE_HOST_KEY_CHECKING='false',
PYTHONPATH=os.path.abspath('lib'),
PAGER='/bin/cat',
PATH=path,
)
env.update(ansible)
return env
| camradal/ansible | test/runner/lib/ansible_util.py | Python | gpl-3.0 | 856 |
#!/usr/bin/env python3
import tvcheck.pyfs
pyfs.main()
| 35359595/pyfs | tvcheck/__init__.py | Python | apache-2.0 | 55 |
# pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.graph_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import math_ops # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
from tensorflow.python.tools import optimize_for_inference_lib
class OptimizeForInferenceTest(test.TestCase):
def create_node_def(self, op, name, inputs):
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node_def(self, name, value, dtype, shape=None):
node = self.create_node_def("Const", name, [])
self.set_attr_dtype(node, "dtype", dtype)
self.set_attr_tensor(node, "value", value, dtype, shape)
return node
def set_attr_dtype(self, node, key, value):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(type=value.as_datatype_enum))
def set_attr_tensor(self, node, key, value, dtype, shape=None):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape)))
def testOptimizeForInference(self):
self.maxDiff = 1000
unused_constant_name = "unused_constant"
unconnected_add_name = "unconnected_add"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
unused_output_add_name = "unused_output_add"
graph_def = graph_pb2.GraphDef()
unused_constant = self.create_constant_node_def(
unused_constant_name, value=0, dtype=dtypes.float32, shape=[])
graph_def.node.extend([unused_constant])
unconnected_add_node = self.create_node_def(
"Add", unconnected_add_name,
[unused_constant_name, unused_constant_name])
self.set_attr_dtype(unconnected_add_node, "T", dtypes.float32)
graph_def.node.extend([unconnected_add_node])
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = self.create_node_def("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = self.create_node_def(
"Identity", a_identity_name, [a_constant_name, "^" + a_check_name])
graph_def.node.extend([a_identity_node])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = self.create_node_def("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = self.create_node_def(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = self.create_node_def("Add", add_name,
[a_identity_name, b_identity_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
unused_output_add_node = self.create_node_def("Add", unused_output_add_name,
[add_name, b_constant_name])
self.set_attr_dtype(unused_output_add_node, "T", dtypes.float32)
graph_def.node.extend([unused_output_add_node])
expected_output = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = self.create_node_def("Add", add_name,
[a_constant_name, b_constant_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
output = optimize_for_inference_lib.optimize_for_inference(
graph_def, [], [add_name], dtypes.float32.as_datatype_enum)
self.assertProtoEquals(expected_output, output)
def testFoldBatchNorms(self):
with self.test_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 1, 6, 2], dtype=dtypes.float32)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
conv_op = nn_ops.conv2d(
input_op, weights_op, [1, 1, 1, 1], padding="SAME", name="conv_op")
mean_op = constant_op.constant(
np.array([10, 20]), shape=[2], dtype=dtypes.float32)
variance_op = constant_op.constant(
np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)
beta_op = constant_op.constant(
np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)
gamma_op = constant_op.constant(
np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)
test_util.set_producer_version(ops.get_default_graph(), 8)
gen_nn_ops._batch_norm_with_global_normalization(
conv_op,
mean_op,
variance_op,
beta_op,
gamma_op,
0.00001,
False,
name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(
original_graph_def)
with self.test_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("BatchNormWithGlobalNormalization", node.op)
def testFoldFusedBatchNorms(self):
for data_format, use_gpu in [("NHWC", False), ("NCHW", True)]:
with self.test_session(use_gpu=use_gpu) as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs),
shape=[1, 1, 6, 2] if data_format == "NHWC" else [1, 2, 1, 6],
dtype=dtypes.float32)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
conv_op = nn_ops.conv2d(
input_op,
weights_op, [1, 1, 1, 1],
padding="SAME",
data_format=data_format,
name="conv_op")
mean_op = constant_op.constant(
np.array([10, 20]), shape=[2], dtype=dtypes.float32)
variance_op = constant_op.constant(
np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)
beta_op = constant_op.constant(
np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)
gamma_op = constant_op.constant(
np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)
ops.get_default_graph().graph_def_versions.producer = 9
gen_nn_ops._fused_batch_norm(
conv_op,
gamma_op,
beta_op,
mean_op,
variance_op,
0.00001,
is_training=False,
data_format=data_format,
name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(
original_graph_def)
with self.test_session(use_gpu=use_gpu) as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(
original_result, optimized_result, rtol=1e-04, atol=1e-06)
for node in optimized_graph_def.node:
self.assertNotEqual("FusedBatchNorm", node.op)
def testFuseResizePadAndConv(self):
with self.test_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
resize_op = image_ops.resize_bilinear(
input_op, [12, 4], align_corners=False)
pad_op = array_ops.pad(resize_op, [[0, 0], [1, 1], [2, 2], [0, 0]],
mode="REFLECT")
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.test_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("MirrorPad", node.op)
self.assertNotEqual("ResizeBilinear", node.op)
def testFuseResizeAndConv(self):
with self.test_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
resize_op = image_ops.resize_bilinear(
input_op, [12, 4], align_corners=False)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
resize_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.test_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("MirrorPad", node.op)
def testFusePadAndConv(self):
with self.test_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
pad_op = array_ops.pad(input_op, [[0, 0], [1, 1], [2, 2], [0, 0]],
mode="REFLECT")
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
nn_ops.conv2d(
pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
original_graph_def, ["output"])
with self.test_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("Conv2D", node.op)
self.assertNotEqual("ResizeBilinear", node.op)
if __name__ == "__main__":
test.main()
| nburn42/tensorflow | tensorflow/python/tools/optimize_for_inference_test.py | Python | apache-2.0 | 13,432 |
from django.contrib import admin
from models import *
class LayerAdmin(admin.ModelAdmin):
list_display = ('name', 'layerName', 'provider', 'public', 'minzoom', 'maxzoom')
list_filter = ('public', 'provider')
list_editable = ('public',)
admin.site.register(Layer, LayerAdmin)
| trailbehind/EasyTileServer | webApp/layers/admin.py | Python | bsd-3-clause | 289 |
# -*- coding: utf-8 -*-
from fabric.api import *
@task
def bootstrap(puppetmaster=''):
"""Bootstrap an Ubuntu 14.04 host on Upcloud"""
from . import util, puppet
util.install_sudo()
fqdn = run('hostname -f')
hostname = run('hostname -s')
ipaddress = run("/sbin/ifconfig eth0 | grep 'inet ' | awk -F'[: ]+' '{ print $4 }'")
hosts_line = "%s %s %s" % (ipaddress, fqdn, hostname)
sudo("sed -i '/127.0.1.1/d' /etc/hosts")
with settings(warn_only=True):
grep = run("grep \"%s\" /etc/hosts" % hosts_line)
if grep.failed:
sudo("echo %s >> /etc/hosts" % hosts_line)
puppet.install(puppetmaster)
puppet.run_agent()
| mattock/fabric | upcloud.py | Python | bsd-2-clause | 676 |
import logging
logger = logging.getLogger(__name__)
from redbean.logs import setup_logging
from pathlib import Path
setup_logging(config=Path(__file__).parent / 'conf/logging.yaml')
import aiohttp
import redbean
rest = redbean.Routes()
def create_app():
app = aiohttp.web.Application()
app['secure_key'] = 'DjwennlKciQiTlxKmYtWqH8N'
app['etcd_endpoint'] = "127.0.0.1:2379"
rest.setup(app)
rest.add_module('serv2', prefix='/api')
return app
# python -m redbean.run -p 8500 test/security/app.py
| lcgong/alchemy | busiserv/app.py | Python | gpl-3.0 | 535 |
from JumpScale import j
class ActorsInfo():
def getActorMethodCall(self, appname, actor, method):
"""
used for during error show links to methods in browser
"""
url = "/rest/%s/%s/%s?" % (appname, actor, method)
auth = j.core.portal.active.ws.routes["%s_%s_%s" % (appname, actor, method)][5]
if auth:
params = ["authkey"]
else:
params = []
params.extend(j.core.portal.active.ws.routes["%s_%s_%s" % (appname, actor, method)][1].keys())
for param in params:
url += "%s=&" % param
url += "format=text"
if url[-1] == "&":
url = url[:-1]
if url[-1] == "?":
url = url[:-1]
# url="<a href=\"%s\">%s</a> " % (url,url)
return url
def getActorInfoPage(self, appname, actorname, methodname, page=None):
"""
used for during error show info about 1 actor
"""
if appname == "" or actorname == "" or methodname == "":
txt = "getActorInfo need 3 params: appname, actorname, methoname, got: %s, %s,%s" % (appname, actorname, methodname)
return txt
if page == None:
page = j.core.portal.active.getpage()
page.addHeading("%s.%s.%s" % (appname, actorname, methodname), 5)
url = getActorMethodCall(appname, actorname, methodname)
routekey="%s_%s_%s" % (appname, actorname, methodname)
if not j.core.portal.active.routes.has_key(routekey):
j.core.portal.active.activateActor(appname, actorname)
routeData = j.core.portal.active.routes[routekey]
# routedata: function,paramvalidation,paramdescription,paramoptional,description
description = routeData[4]
if description.strip() != "":
page.addMessage(description)
# param info
params = routeData[1]
descriptions = routeData[2]
# optional = routeData[3]
page.addLink("%s" % (methodname), url)
if len(params.keys()) > 0:
page.addBullet("Params:\n", 1)
for key in params.keys():
if key in descriptions:
descr = descriptions[key].strip()
else:
descr = ""
page.addBullet("- %s : %s \n" % (key, descr), 2)
return page
def getActorsInfoPage(appname="", actor="", page=None, extraParams={}):
actorsloader = j.core.portal.active.actorsloader
if appname != "" and actor != "":
result = j.core.portal.active.activateActor(appname, actor)
if result == False:
# actor was not there
page = j.core.portal.active.getpage()
page.addHeading("Could not find actor %s %s." % (appname, actor), 4)
return page
if page == None:
page = j.core.portal.active.getpage()
if appname == "":
page.addHeading("Applications in appserver.", 4)
appnames = {}
for appname, actorname in actorsloader.getAppActors(): # [item.split("_", 1) for item in self.app_actor_dict.keys()]:
appnames[appname] = 1
appnames = sorted(appnames.keys())
for appname in appnames:
link = page.getLink("%s" % (appname), getActorInfoUrl(appname, ""))
page.addBullet(link)
return page
if actor == "":
page.addHeading("Actors for application %s" % (appname), 4)
actornames = []
for appname2, actorname2 in actorsloader.getAppActors(): # [item.split("_", 1) for item in self.app_actor_dict.keys()]:
if appname2 == appname:
actornames.append(actorname2)
actornames.sort()
for actorname in actornames:
link = page.getLink("%s" % (actorname), getActorInfoUrl(appname, actorname))
page.addBullet(link)
return page
keys = sorted(j.core.portal.active.routes.keys())
page.addHeading("list", 2)
for item in keys:
app2, actor2, method = item.split("_")
if app2 == appname and actor2 == actor:
url = getActorMethodCall(appname, actor, method)
link = page.getLink(item, url)
page.addBullet(link)
page.addHeading("details", 2)
for item in keys:
app2, actor2, method = item.split("_")
if app2 == appname and actor2 == actor:
page = getActorInfoPage(appname, actor, method, page=page)
| Jumpscale/jumpscale6_core | lib/JumpScale/portal/portalloaders/ActorsInfo.py | Python | bsd-2-clause | 4,625 |
from django.conf import settings
from django.contrib.messages.storage import default_storage
from django.utils.deprecation import MiddlewareMixin
class MessageMiddleware(MiddlewareMixin):
"""
Middleware that handles temporary messages.
"""
def process_request(self, request):
request._messages = default_storage(request)
def process_response(self, request, response):
"""
Updates the storage backend (i.e., saves the messages).
If not all messages could not be stored and ``DEBUG`` is ``True``, a
``ValueError`` is raised.
"""
# A higher middleware layer may return a request which does not contain
# messages storage, so make no assumption that it will be there.
if hasattr(request, '_messages'):
unstored_messages = request._messages.update(response)
if unstored_messages and settings.DEBUG:
raise ValueError('Not all temporary messages could be stored.')
return response
| KrzysztofStachanczyk/Sensors-WWW-website | www/env/lib/python2.7/site-packages/django/contrib/messages/middleware.py | Python | gpl-3.0 | 1,019 |
import logging
import emission.core.wrapper.wrapperbase as ecwb
class Location(ecwb.WrapperBase):
props = {"latitude": ecwb.WrapperBase.Access.RO, # latitude of the point
"longitude": ecwb.WrapperBase.Access.RO, # longitude of the point
"loc": ecwb.WrapperBase.Access.RO, # location of the point in geojson.
"ts": ecwb.WrapperBase.Access.RO, # timestamp (in seconds)
"local_dt": ecwb.WrapperBase.Access.RO, # searchable datetime in local time
"fmt_time": ecwb.WrapperBase.Access.RO, # formatted time
"altitude": ecwb.WrapperBase.Access.RO, # altitude of the point
"accuracy": ecwb.WrapperBase.Access.RO, # horizontal accuracy of the point in meters.
# This is the radius of the 68% confidence, so a lower
# number means better accuracy
"sensed_speed": ecwb.WrapperBase.Access.RO, # the speed reported by the phone in m/s
"speed": ecwb.WrapperBase.Access.RO, # the speed calculated by us
"distance": ecwb.WrapperBase.Access.RO, # distance calculated by us
"heading": ecwb.WrapperBase.Access.RO, # heading reported by the phone
"vaccuracy": ecwb.WrapperBase.Access.RO, # vertical accuracy of the point (only iOS)
"floor": ecwb.WrapperBase.Access.RO} # floor in a building that point is in (only iOS)
enums = {}
geojson = ["loc"]
nullable = []
def _populateDependencies(self):
pass
| joshzarrabi/e-mission-server | emission/core/wrapper/location.py | Python | bsd-3-clause | 1,527 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from reno import loader
LOG = logging.getLogger(__name__)
def list_cmd(args, conf):
"List notes files based on query arguments"
LOG.debug('starting list')
reporoot = conf.reporoot
ldr = loader.Loader(conf)
if args.version:
versions = args.version
else:
versions = ldr.versions
for version in versions:
notefiles = ldr[version]
print(version)
for n, sha in notefiles:
if n.startswith(reporoot):
n = n[len(reporoot):]
print('\t%s (%s)' % (n, sha))
return
| openstack/reno | reno/lister.py | Python | apache-2.0 | 1,130 |
# -*- encoding: utf-8 -*-
import abc
from abjad.tools.datastructuretools.TreeContainer import TreeContainer
class ReSTDirective(TreeContainer):
r'''A ReST directive.
'''
### INITIALIZER ###
def __init__(
self,
argument=None,
children=None,
directive=None,
name=None,
options=None,
):
TreeContainer.__init__(self, children=children, name=name)
assert isinstance(options, (dict, type(None)))
self._argument = argument
self._options = {}
if options is not None:
self._options.update(options)
self._directive = directive
### PRIVATE PROPERTIES ###
@property
def _children_rest_format_contributions(self):
result = []
for child in self.children:
result.append('')
contribution = child._rest_format_contributions
for x in contribution:
if x:
result.append(' ' + x)
else:
result.append(x)
return result
@property
def _rest_format_contributions(self):
if self.argument:
result = ['.. {}:: {}'.format(self.directive, self.argument)]
else:
result = ['.. {}::'.format(self.directive)]
for key, value in sorted(self.options.items()):
option = ' :{}:'.format(key)
if value is True:
pass
elif value is None or value is False:
continue
elif isinstance(value, (list, tuple)):
option += ' ' + ', '.join(str(x) for x in value)
elif isinstance(value, (int, float, str)):
option += ' ' + str(value)
result.append(option)
result.extend(self._children_rest_format_contributions)
return result
@property
def _storage_format_specification(self):
from abjad.tools import systemtools
return systemtools.StorageFormatSpecification(
self,
keywords_ignored_when_false=(
'children',
'name',
'options',
),
)
### PUBLIC PROPERTIES ###
@property
def argument(self):
r'''Gets and sets argument of ReST directive.
'''
return self._argument
@argument.setter
def argument(self, arg):
assert isinstance(arg, (str, type(None)))
self._argument = arg
@property
def directive(self):
r'''Gets and sets directive of ReST directive.
'''
return self._directive
@directive.setter
def directive(self, expr):
self._directive = str(expr)
@property
def node_class(self):
r'''Node class of ReST directive.
'''
from abjad.tools import documentationtools
return (
documentationtools.ReSTDirective,
documentationtools.ReSTHeading,
documentationtools.ReSTHorizontalRule,
documentationtools.ReSTParagraph,
)
@property
def options(self):
r'''Options of ReST directive.
'''
return self._options
@property
def rest_format(self):
r'''ReST format of ReST directive.
'''
return '\n'.join(self._rest_format_contributions) | andrewyoung1991/abjad | abjad/tools/documentationtools/ReSTDirective.py | Python | gpl-3.0 | 3,357 |
import platform
import socket
import sys
import os
from mule_local.JobGeneration import *
from mule.JobPlatformResources import *
from . import JobPlatformAutodetect
# Underscore defines symbols to be private
_job_id = None
def get_platform_autodetect():
"""
Returns
-------
bool
True if current platform matches, otherwise False
"""
return JobPlatformAutodetect.autodetect()
def get_platform_id():
"""
Return platform ID
Returns
-------
string
unique ID of platform
"""
return "linuxcluster_intel"
def get_platform_resources():
"""
Return information about hardware
"""
h = JobPlatformResources()
h.num_cores_per_node = 28
# Number of nodes per job are limited
h.num_nodes = 384
#h.num_nodes = 60
h.num_cores_per_socket = 14
h.max_wallclock_seconds = 48*60*60
return h
def jobscript_setup(jg : JobGeneration):
"""
Setup data to generate job script
"""
global _job_id
_job_id = jg.runtime.getUniqueID(jg.compile, jg.unique_id_filter)
return
def jobscript_get_header(jg : JobGeneration):
"""
These headers typically contain the information on e.g. Job exection, number of compute nodes, etc.
Returns
-------
string
multiline text for scripts
"""
global _job_id
p = jg.parallelization
time_str = p.get_max_wallclock_seconds_hh_mm_ss()
#
# See https://www.lrz.de/services/compute/linux-cluster/batch_parallel/example_jobs/
#
content = """#! /bin/bash
#SBATCH -o """+jg.p_job_stdout_filepath+"""
#SBATCH -e """+jg.p_job_stderr_filepath+"""
#SBATCH -D """+jg.p_job_dirpath+"""
#SBATCH -J """+_job_id+"""
"""
if p.num_nodes <= 2:
content += "#SBATCH --clusters=cm2_tiny\n"
else:
content += "#SBATCH --clusters=cm2\n"
content += """#SBATCH --get-user-env
#SBATCH --nodes="""+str(p.num_nodes)+"""
#SBATCH --ntasks-per-node="""+str(p.num_ranks_per_node)+"""
# the above is a good match for the
# CooLMUC2 architecture.
#SBATCH --mail-type=end
#SBATCH --mail-user=schreiberx@gmail.com
#SBATCH --export=NONE
#SBATCH --time="""+time_str+"""
"""
if p.num_nodes <= 2:
content += "#SBATCH --partition=cm2_tiny\n"
elif p.num_nodes <= 24:
content += "#SBATCH --partition=cm2_std\n"
content += "#SBATCH --qos=cm2_std\n"
else:
content += "#SBATCH --partition=cm2_large\n"
content += "#SBATCH --qos=cm2_large\n"
content += "\n"
content += "module load slurm_setup\n"
if False:
if p.force_turbo_off:
content += """# Try to avoid slowing down CPUs
#SBATCH --cpu-freq=Performance
"""
content += """
source /etc/profile.d/modules.sh
"""
if jg.compile.threading != 'off':
content += """
export OMP_NUM_THREADS="""+str(p.num_threads_per_rank)+"""
"""
if p.core_oversubscription:
raise Exception("Not supported with this script!")
if p.core_affinity != None:
content += "\necho \"Affnity: "+str(p.core_affinity)+"\"\n"
if p.core_affinity == 'compact':
content += "\nexport OMP_PROC_BIND=close\n"
elif p.core_affinity == 'scatter':
content += "\nexport OMP_PROC_BIND=spread\n"
else:
raise Exception("Affinity '"+str(p.core_affinity)+"' not supported")
return content
def jobscript_get_exec_prefix(jg : JobGeneration):
"""
Prefix before executable
Returns
-------
string
multiline text for scripts
"""
content = ""
content += jg.runtime.get_jobscript_plan_exec_prefix(jg.compile, jg.runtime)
return content
def jobscript_get_exec_command(jg : JobGeneration):
"""
Prefix to executable command
Returns
-------
string
multiline text for scripts
"""
p = jg.parallelization
mpiexec = ''
#
# Only use MPI exec if we are allowed to do so
# We shouldn't use mpiexec for validation scripts
#
if not p.mpiexec_disabled:
mpiexec = "mpiexec -n "+str(p.num_ranks)+" --perhost "+str(p.num_ranks_per_node)
sweet_ld_library_path = os.getenv('MULE_LD_LIBRARY_PATH')
if sweet_ld_library_path == None:
raise Exception("Environment variable MULE_LD_LIBRARY_PATH not found!")
content = """
# Output MPI version
echo "**************************************************"
echo "MPI Information"
echo "**************************************************"
echo "mpiexec --version"
mpiexec --version 2>&1
echo "**************************************************"
# List loaded modules
echo "**************************************************"
echo "Loaded modules"
echo "**************************************************"
echo "module list"
module list 2>&1
echo "**************************************************"
# Make sure that MULE library path is really known
export LD_LIBRARY_PATH=\""""+sweet_ld_library_path+""":$LD_LIBRARY_PATH\"
# mpiexec ... would be here without a line break
EXEC=\""""+jg.compile.getProgramPath()+"""\"
PARAMS=\""""+jg.runtime.getRuntimeOptions()+"""\"
echo \"${EXEC} ${PARAMS}\"
"""+mpiexec+""" $EXEC $PARAMS || exit 1
"""
return content
def jobscript_get_exec_suffix(jg : JobGeneration):
"""
Suffix before executable
Returns
-------
string
multiline text for scripts
"""
content = ""
content += jg.runtime.get_jobscript_plan_exec_suffix(jg.compile, jg.runtime)
return content
def jobscript_get_footer(jg : JobGeneration):
"""
Footer at very end of job script
Returns
-------
string
multiline text for scripts
"""
content = ""
return content
def jobscript_get_compile_command(jg : JobGeneration):
"""
Compile command(s)
This is separated here to put it either
* into the job script (handy for workstations)
or
* into a separate compile file (handy for clusters)
Returns
-------
string
multiline text with compile command to generate executable
"""
content = """
SCONS="scons """+jg.compile.getSConsParams()+' -j 4"'+"""
echo "$SCONS"
$SCONS || exit 1
"""
return content
| schreiberx/sweet | mule/platforms/50_linuxcluster_intel/JobPlatform.py | Python | mit | 6,146 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2009—2010 Andrey Mikhailenko and contributors
#
# This file is part of Dark.
#
# Dark is free software under terms of the GNU Lesser
# General Public License version 3 (LGPLv3) as published by the Free
# Software Foundation. See the file README for copying conditions.
#
# Usage:
# items = dataset.items(dataset.find(name='John'))
# a = Avg('age') # created an aggregation manager: "average age"
# calc = a.aggregate(items) # created lazy calculation for given list of dictionaries
# int(calc) # here the calculation is actually done
# int(calc) # cached result returned, no recalc
"""
Aggregates
==========
"""
from decimal import Decimal
__all__ = ['Aggregate', 'Avg', 'Count', 'Max', 'Median', 'Min', 'Sum', 'Qu1', 'Qu3', 'NA']
DECIMAL_EXPONENT = Decimal('.01') # XXX let user change this
class AggregationError(Exception):
pass
class LazyCalculation(object):
def __init__(self, agg, values):
self.agg = agg
self.values = values
self.result = None
def get_result(self):
if len(self.values) == 0:
return None
try:
self.result = self.result or self.agg.calc(self.values)
except TypeError, e:
raise AggregationError('Could not perform %s aggregation on key '
'"%s" data contains a non-numeric value. '
'Original message: %s' % (self.agg.name(),
self.agg.key, e.message))
#if isinstance(self.result, Decimal):
# # we don't want tens of zeroes, do we
# self.result = Decimal(self.result).quantize(DECIMAL_EXPONENT)
self.result = Decimal(self.result).quantize(DECIMAL_EXPONENT)
return self.result
def __int__(self):
return int(self.get_result())
def __float__(self):
return float(self.get_result())
def __str__(self):
return str(self.get_result())
def __repr__(self):
return '<lazy {name} by {values}>'.format(
name = self.agg.name,
values = '{0} values'.format(
len(self.values) if len(self.values) > 3 else self.values
)
)
class NA(object):
"""
Policy against N/A values. To be used in Aggregate constructors::
Min('key', NA.skip).
"""
skip, reject = 1, 2
def __str__(self):
return 'N/A'
def __repr__(self):
return '<N/A>'
class Aggregate(object):
def __init__(self):
self.key = None
def __str__(self):
return '%s(%s)' % (self.__class__.__name__, self.key or 'all')
def __repr__(self):
return '<%s>' % str(self)
def name(self):
return self.__class__.__name__
class AggregateManager(Aggregate):
"TODO factory?"
def __init__(self, key, na_policy=NA.skip):
self.key = key
self.na_policy = na_policy
def count_for(self, dictionaries):
values = []
for item in dictionaries:
value = item.get(self.key, None)
if value is None:
# decide what to do if a None is found in values (i.e. a value is not available)
if self.na_policy == NA.reject:
# reset the whole calculated value to None if at least one value is N/A
return None
elif self.na_policy == NA.skip:
# silently ignore items with empty values, count only existing integers; same as "rm.na" in R (?)
continue
values.append(value)
if values:
return LazyCalculation(self, values)
else:
return NA()
def calc(self, values):
raise NotImplementedError
# CLASSES THAT INHERIT TO AggregateManager
class Avg(AggregateManager):
@staticmethod
def calc(values):
return Decimal(sum(values, 0)) / len(values)
class Max(AggregateManager):
@staticmethod
def calc(values):
return str(max(values)) # str for later conversion to decimal
class Median(AggregateManager):
"""
Given a vector V of length N, the median of V is the middle value of a sorted
copy of V, V_sorted - i.e., V_sorted[(N-1)/2], when N is odd. When N is even,
it is the average of the two middle values of V_sorted.
"""
@staticmethod
def calc(values):
# TODO: force Decimal
values = sorted(values)
middle = len(values)>>1
# when length is odd
if len(values) % 2:
# the median is the middle value
return values[middle]
# when length is even
else:
# it is the average of the two middle values
lower = middle - 1
upper = middle + 1
_sum = sum(values[lower:upper])
if isinstance(_sum, Decimal):
return _sum / Decimal('2.0')
else:
return _sum / 2.0
"""
Almost as commonly used as the median are the quartiles, q0.25 and
q0.75. Usually these are called the lower and upper quartiles,
respectively. They are located halfway between the median, q0.5, and the
extremes, x(1) and x(n). In typically colorful terminology, Tukey (1977)
calls q0.25 and q0.75 the 'hinges', imagining that the data set has been
folded first at the median, and the quartiles.
-- http://mail.python.org/pipermail/python-list/2002-March/134190.html
"""
class Qu1(Median):
"Calculates the q0.25."
def calc(self, values):
values = sorted(values)
l = len(values) / 4
return super(Qu1, self).calc(values[:l])
class Qu3(Median):
"Calculates the q0.75."
def calc(self, values):
values = sorted(values)
l = (len(values) / 4) * 3
return super(Qu3, self).calc(values[l:])
class Min(AggregateManager):
@staticmethod
def calc(values):
return min(values)
class Sum(AggregateManager):
@staticmethod
def calc(values):
return sum(values, 0)
class Count(AggregateManager):
"""
Counts distinct values for given key. If key is not specified, simply counts
all items in the query.
"""
def __init__(self, key=None, na_policy=NA.skip): # TODO: err_policy (skip, raise, set N/A, set 0)
self.key = key
self.na_policy = na_policy
# overload resource-consuming parent method if we can do without it
if not key:
self.count_for = self.calc
@staticmethod
def calc(values):
return len(set(values))
| neithere/dark | dark/aggregates.py | Python | lgpl-3.0 | 6,645 |
#!/usr/bin/env python
import os
import pkg_resources
import platform
import sys
from optparse import OptionParser
from random import choice
class SiteOptions(object):
copy_media = platform.system() == "Windows"
def create_settings():
if not os.path.exists("settings_local.py"):
print "Creating a settings_local.py in the current directory."
print "This can be modified with custom settings."
src_path = os.path.join("contrib", "conf", "settings_local.py.tmpl")
in_fp = open(src_path, "r")
out_fp = open("settings_local.py", "w")
for line in in_fp.xreadlines():
if line.startswith("SECRET_KEY = "):
secret_key = ''.join([
choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')
for i in range(50)
])
out_fp.write('SECRET_KEY = "%s"\n' % secret_key)
elif line.strip().startswith("'ENGINE': "):
out_fp.write(" 'ENGINE': 'django.db.backends.%s',\n" %
options.db_type)
elif line.strip().startswith("'NAME': "):
if options.db_type == 'sqlite':
name = os.path.abspath(options.db_name)
else:
name = options.db_name
out_fp.write(" 'NAME': '%s',\n" % name)
elif line.strip().startswith("'USER': "):
out_fp.write(" 'USER': '%s',\n" % options.db_user)
elif line.strip().startswith("'PASSWORD': "):
out_fp.write(" 'PASSWORD': '%s',\n" % options.db_password)
else:
out_fp.write(line)
in_fp.close()
out_fp.close()
def install_media(site):
print "Rebuilding media paths..."
media_path = os.path.join("htdocs", "media")
uploaded_path = os.path.join(site.install_dir, media_path, "uploaded")
site.mkdir(uploaded_path)
site.mkdir(os.path.join(uploaded_path, "images"))
if not pkg_resources.resource_exists("djblets", "media"):
sys.stderr.write("Unable to find a valid Djblets installation.\n")
sys.stderr.write("Make sure you've ran `python setup.py develop` "
"in the Djblets source tree.\n")
sys.exit(1)
print "Using Djblets media from %s" % \
pkg_resources.resource_filename("djblets", "media")
site.link_pkg_dir("djblets", "media", os.path.join(media_path, "djblets"))
def build_egg_info():
os.system("%s setup.py egg_info" % sys.executable)
def parse_options(args):
global options
parser = OptionParser(usage='%prog [options]')
parser.add_option('--no-media', action='store_false', dest='install_media',
default=True,
help="Don't install media files")
parser.add_option('--no-db', action='store_false', dest='sync_db',
default=True,
help="Don't synchronize the database")
parser.add_option('--database-type', dest='db_type',
default='sqlite3',
help="Database type (postgresql, mysql, sqlite3)")
parser.add_option('--database-name', dest='db_name',
default='reviewboard.db',
help="Database name (or path, for sqlite3)")
parser.add_option('--database-user', dest='db_user',
default='',
help="Database user")
parser.add_option('--database-password', dest='db_password',
default='',
help="Database password")
options, args = parser.parse_args(args)
return args
def main():
if not os.path.exists(os.path.join("reviewboard", "manage.py")):
sys.stderr.write("This must be run from the top-level Review Board "
"directory\n")
sys.exit(1)
# Insert the current directory first in the module path so we find the
# correct reviewboard package.
sys.path.insert(0, os.getcwd())
from reviewboard.cmdline.rbsite import Site
parse_options(sys.argv[1:])
# Re-use the Site class, since it has some useful functions.
site = Site("reviewboard", SiteOptions)
create_settings()
build_egg_info()
if options.install_media:
install_media(site)
if options.sync_db:
print "Synchronizing database..."
site.sync_database(allow_input=True)
print
print "Your Review Board tree is ready for development."
print
if __name__ == "__main__":
main()
| chazy/reviewboard | contrib/internal/prepare-dev.py | Python | mit | 4,605 |
#!/usr/bin/env python
#
# Copyright (c) 2012, Centre National de la Recherche Scientifique (CNRS)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import os
import re
import shutil
import stat
from os.path import dirname
from tempfile import mkstemp, mkdtemp
from stratuslab.Util import execute, scp
from stratuslab.cloudinit.Util import decode_multipart_as_json
class TMContext(object):
''' Create the disk with context information. This is a CDROM for
standard OpenNebula/HEPiX contextualization. It is a VFAT-formatted
volume for cloud-init contextualization.
'''
# Debug option
PRINT_TRACE_ON_ERROR = True
DEFAULT_VERBOSELEVEL = 0
# Context disk permissions = 0660
DISK_PERMS = (stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP | stat.S_IWGRP)
def __init__(self, args, **kwargs):
self.args = args
def run(self):
try:
self._run()
finally:
self._cleanup()
def _run(self):
TMContext._checkArgs(self.args)
contextFile = self.args[1]
contextDiskFile = self.args[2]
cdromFiles = self.args[1:]
cdromFiles.remove(contextDiskFile)
kvpairs = TMContext._parseContextFile(contextFile)
method = kvpairs.get('context_method', 'opennebula')
if (method == 'cloud-init'):
vfat_script_dir = '/var/lib/stratuslab/python/stratuslab/tm'
vfat_script = os.path.join(vfat_script_dir, 'TMMakeVFAT.py')
kvpairs['vfat_script'] = vfat_script
TMContext._doCloudInit(contextDiskFile, kvpairs)
else:
TMContext._doOpenNebula(contextDiskFile, cdromFiles)
def _cleanup(self):
pass
@staticmethod
def _checkArgs(args):
if (not args or len(args) < 3):
raise ValueError('must have at least two arguments: destination disk and context file')
'''
This does a "dirty" parsing of the context file looking only
for the lines with the keys CONTEXT_METHOD,
CLOUD_INIT_USER_DATA and CLOUD_INIT_AUTHORIZED_KEYS. All of
the other key-value pairs do not need to be understood by this
class. A map with these values (if found) are returned.
'''
@staticmethod
def _parseContextFile(context_file):
result = {}
with open(context_file, 'r') as f:
for line in f:
match = re.match('\s*CONTEXT_METHOD\s*=\s*"(.+)".*', line)
if match:
result['context_method'] = match.group(1).strip()
match = re.match('\s*CLOUD_INIT_USER_DATA\s*=\s*"(.+)".*', line)
if match:
result['user_data'] = match.group(1).strip()
match = re.match('\s*CLOUD_INIT_AUTHORIZED_KEYS\s*=\s*"(.+)".*', line)
if match:
result['authorized_keys'] = match.group(1).strip()
return result
@staticmethod
def _doOpenNebula(contextDiskFile, cdromFiles):
tmpdir = None
image = None
try:
tmpdir = mkdtemp()
for f in cdromFiles:
shutil.copy(f, tmpdir)
_, image = mkstemp()
cmd = ["mkisofs", "-V", "_STRATUSLAB", "-o", image, "-J", "-R", tmpdir]
rc = execute(cmd)
if (rc != 0):
raise Exception("error creating cdrom")
os.chmod(image, TMContext.DISK_PERMS)
scp(image, contextDiskFile)
finally:
if tmpdir:
shutil.rmtree(tmpdir, True)
if image:
os.remove(image)
@staticmethod
def _makeEmptyFile(size=1024*1000):
_, file = mkstemp()
with open(file, 'w') as f:
f.seek(size - 1)
f.write('\0')
return file
@staticmethod
def _doCloudInit(contextDiskFile, params):
content_dir = None
image = None
vfat_script = params['vfat_script']
try:
content_dir = mkdtemp()
try:
b64_content = params['network']
net_dir = os.path.join(content_dir, 'etc', 'network')
os.makedirs(net_dir)
net_file = os.path.join(net_dir, 'interfaces')
with open(net_file, 'wb') as f:
content = base64.b64decode(b64_content)
f.write(content)
except KeyError:
pass
try:
b64_content = params['authorized_keys']
ssh_dir = os.path.join(content_dir, 'root', '.ssh')
os.makedirs(ssh_dir)
keys_file = os.path.join(ssh_dir, 'authorized_keys')
with open(keys_file, 'wb') as f:
content = base64.b64decode(b64_content)
f.write(content)
except KeyError:
pass
try:
encoded_content = params['user_data']
except KeyError:
encoded_content = None
meta_content = decode_multipart_as_json('local', encoded_content)
meta_file = os.path.join(content_dir, 'meta.js')
with open(meta_file, 'wb') as f:
f.write(meta_content)
#
# This must be run as root because the VFAT file must be
# mounted and unmounted from the file system.
#
image = TMContext._makeEmptyFile()
cmd = ['sudo', vfat_script, content_dir, image]
rc = execute(cmd)
if (rc != 0):
raise Exception('cannot create VFAT file system for cloud-init')
scp(image, contextDiskFile)
finally:
if content_dir:
shutil.rmtree(content_dir, True)
if image:
os.remove(image)
| StratusLab/client | api/code/src/main/python/stratuslab/tm/TMContext.py | Python | apache-2.0 | 6,394 |
import logging
import os
import errno
def create_file(filename):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
open(filename, 'a').close()
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def custom_logger(name, file):
# create logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
create_file(file)
ch = logging.FileHandler(file)
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
return logger
| AntoinePrv/hyperNN | problem/logger.py | Python | mit | 828 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import log
from anvil.actions import base as action
from anvil.actions import uninstall
LOG = log.getLogger(__name__)
class RemoveAction(uninstall.UninstallAction):
def _run(self, persona, component_order, instances):
super(RemoveAction, self)._run(persona, component_order, instances)
removals = ['package-install', 'install', 'package']
dependency_handler_class = self.distro.dependency_handler_class
dependency_handler = dependency_handler_class(self.distro,
self.root_dir,
instances.values())
general_package = "general"
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info("Destroying packages"),
run=lambda i: dependency_handler.destroy(),
end=None,
),
[general_package],
{general_package: instances[general_package]},
"package-destroy",
*removals
)
removals += ['prepare', 'download', "download-patch"]
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info('Uninstalling %s.', colorizer.quote(i.name)),
run=lambda i: i.uninstall(),
end=None,
),
component_order,
instances,
'uninstall',
*removals
)
removals += ['pre-install', 'post-install']
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info('Post-uninstalling %s.', colorizer.quote(i.name)),
run=lambda i: i.post_uninstall(),
end=None,
),
component_order,
instances,
'post-uninstall',
*removals
)
| toby82/anvil | anvil/actions/remove.py | Python | apache-2.0 | 2,601 |
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import processutils
from oslo_log import log
from nova import exception
from nova import utils
from novadocker.i18n import _
LOG = log.getLogger(__name__)
def teardown_network(container_id):
try:
output, err = utils.execute('ip', '-o', 'netns', 'list')
for line in output.split('\n'):
if container_id == line.strip():
utils.execute('ip', 'netns', 'delete', container_id,
run_as_root=True)
break
except processutils.ProcessExecutionError:
LOG.warning(_('Cannot remove network namespace, netns id: %s'),
container_id)
def find_fixed_ip(instance, subnet):
try:
netmask = subnet['cidr'].split('/')[1]
for ip in subnet['ips']:
if ip['type'] == 'fixed' and ip['address']:
return ip['address'] + "/" + netmask
except Exception, e:
raise exception.InstanceDeployFailure(_('Cannot find fixed ip'),
instance_id=instance['uuid'])
def find_gateway(instance, subnet):
try:
return subnet['gateway']['address']
except Exception, e:
raise exception.InstanceDeployFailure(_('Cannot find gateway'),
instance_id=instance['uuid'])
# NOTE(arosen) - this method should be removed after it's moved into the
# linux_net code in nova.
def get_ovs_interfaceid(vif):
return vif.get('ovs_interfaceid') or vif['id']
| mangalaman93/nova-docker | novadocker/virt/docker/network.py | Python | apache-2.0 | 2,161 |
import socket
import struct
import optparse
def prepare_sock(port):
udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_sock.bind(('0.0.0.0', 0))
udp_sock.connect(('0.0.0.0', port))
return udp_sock
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-p', '--port', dest='port', default=1497, type='int', help='The port to send to')
opts, args = parser.parse_args()
udp_sock = prepare_sock(opts.port)
udp_sock.send('')
buf = udp_sock.recv(100)
tv_sec, tv_usec = struct.unpack('@ll', buf)
print 'tv_sec = %d, tv_usec = %d' % (tv_sec, tv_usec)
| eklitzke/event-clock | client.py | Python | isc | 594 |
#
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
#
from typing import Any, Dict, List, Optional, Set, Tuple, Union
from pyparsing import (Combine, Forward, Group, IndentedBlock, Keyword, LineEnd, Literal, OneOrMore, Opt,
ParseFatalException, SkipTo, Suppress, Word, ZeroOrMore, alphanums, alphas, delimited_list,
nums, rest_of_line)
class Empty:
"""
Return `Empty()` when the sdkconfig does not meet the conditional statements.
"""
def __repr__(self):
return '<EMPTY>'
def __bool__(self):
return False
class Fragment:
"""
Base class for a fragment that can be parsed from a fragment file.
"""
IDENTIFIER = Word(alphas + '_', alphanums + '_')
ENTITY = Word(alphanums + '.-_$+')
def __init__(self, name: str, entries: Set[Union[str, Tuple[str]]]):
self.name = name
self.entries = entries
def __repr__(self):
return str(self.__dict__)
class Sections(Fragment):
"""
Fragment which contains list of input sections.
[sections:<name>]
entries:
.section1
.section2
...
"""
# Unless quoted, symbol names start with a letter, underscore, or point
# and may include any letters, underscores, digits, points, and hyphens.
ENTRY = Combine(Word(alphas + '_.', alphanums + '._-') + Opt('+')) + LineEnd().suppress()
@staticmethod
def parse_entry(toks):
# section
return toks[0]
@staticmethod
def parse(s, loc, toks):
this = toks[0]
name = this[0]
entries = {entry for entry in this[1] if entry}
if not entries:
raise ParseFatalException(s, loc, 'Sections entries shouldn\'t be empty')
return Sections(name, entries)
@staticmethod
def get_section_data_from_entry(sections_entry, symbol=None):
"""
Returns a list of sections given a sections fragment entry,
with the '+' notation and symbol concatenation handled automatically.
"""
if not symbol:
sections = list()
sections.append(sections_entry.replace('+', ''))
sections.append(sections_entry.replace('+', '.*'))
return sections
else:
if sections_entry.endswith('+'):
section = sections_entry.replace('+', '.*')
expansion = section.replace('.*', '.' + symbol)
return section, expansion
else:
return sections_entry, None
class Scheme(Fragment):
"""
Fragment which defines where the input sections defined in a Sections fragment
is going to end up, the target. The targets are markers in a linker script template
(see LinkerScript in linker_script.py).
[scheme:<name>]
entries:
sections1 -> target1
...
"""
ENTRY = Fragment.IDENTIFIER + Suppress('->') + Fragment.IDENTIFIER + LineEnd().suppress()
@staticmethod
def parse_entry(toks):
# section, target
return toks[0], toks[1]
@staticmethod
def parse(s, loc, toks):
this = toks[0]
name = this[0]
entries = {entry for entry in this[1] if entry}
if not entries:
raise ParseFatalException(s, loc, 'Scheme entries shouldn\'t be empty')
return Scheme(name, entries)
class EntryFlag:
def __repr__(self):
return str(self.__dict__)
class Surround(EntryFlag):
"""
SURROUND(symbol)
'__symbol_start', '__symbol_end' is generated before and after
the corresponding input section description, respectively.
"""
SURROUND = (Keyword('SURROUND').suppress()
+ Suppress('(')
+ Fragment.IDENTIFIER
+ Suppress(')'))
def __init__(self, symbol: str):
self.symbol = symbol
self.pre = True
self.post = True
def __eq__(self, other):
if isinstance(other, Surround):
if self.symbol == other.symbol and self.pre == other.pre and self.post == other.post:
return True
return False
@staticmethod
def parse(toks):
return Surround(toks[0])
class Align(EntryFlag):
"""
ALIGN(alignment, [, pre, post]).
Generates alignment command before and/or after the corresponding
input section description, depending on whether pre, post or
both are specified.
"""
PRE = Opt(Suppress(',') + Suppress('pre')).set_results_name('pre')
POST = Opt(Suppress(',') + Suppress('post')).set_results_name('post')
ALIGN = (Keyword('ALIGN').suppress()
+ Suppress('(')
+ Word(nums)
+ PRE
+ POST
+ Suppress(')'))
def __init__(self, alignment, pre=True, post=False):
self.alignment = alignment
self.pre = pre
self.post = post
def __eq__(self, other):
if isinstance(other, Align):
if self.alignment == other.alignment and self.pre == other.pre and self.post == other.post:
return True
return False
@staticmethod
def parse(toks):
alignment = int(toks[0])
if toks.post == '':
return Align(alignment)
if toks.pre == '' and toks.post != '':
return Align(alignment, False, True)
return Align(alignment, True, True)
class Keep(EntryFlag):
"""
KEEP()
Surrounds input section description with KEEP command.
"""
KEEP = Keyword('KEEP()')
def __eq__(self, other):
if isinstance(other, Keep):
return True
return False
@staticmethod
def parse():
return Keep()
class Sort(EntryFlag):
"""
SORT([sort_by_first, sort_by_second])
where sort_by_first, sort_by_second = {name, alignment, init_priority}
Emits SORT_BY_NAME, SORT_BY_ALIGNMENT or SORT_BY_INIT_PRIORITY
depending on arguments. Nested sort follows linker script rules.
"""
_keywords = Keyword('name') | Keyword('alignment') | Keyword('init_priority')
SORT = (Keyword('SORT').suppress()
+ Suppress('(')
+ _keywords.set_results_name('first')
+ Opt(Suppress(',') + _keywords.set_results_name('second'))
+ Suppress(')'))
def __init__(self, first: str, second: Optional[str] = None):
self.first = first
self.second = second
def __eq__(self, other):
if isinstance(other, Sort):
if self.first == other.first and self.second == other.second:
return True
return False
@staticmethod
def parse(toks):
return Sort(toks.first, toks.second or None)
class Flag:
_section_target = Fragment.IDENTIFIER + Suppress('->') + Fragment.IDENTIFIER
_flag = (Surround.SURROUND.set_parse_action(Surround.parse)
| Align.ALIGN.set_parse_action(Align.parse)
| Keep.KEEP.set_parse_action(Keep.parse)
| Sort.SORT.set_parse_action(Sort.parse))
FLAG = _section_target + OneOrMore(_flag)
def __init__(self, section: str, target: str, flags: List[EntryFlag]):
self.section = section
self.target = target
self.flags = flags
def __eq__(self, other):
if isinstance(other, Flag):
if self.section == other.section and self.target == other.target and len(self.flags) == len(other.flags):
for i, j in zip(self.flags, other.flags):
if i != j:
break
else:
return True
return False
@staticmethod
def parse(toks):
return Flag(toks[0], toks[1], toks[2:])
def __repr__(self):
return str(self.__dict__)
class Mapping(Fragment):
"""
Fragment which attaches a scheme to entities (see Entity in entity.py), specifying where the input
sections of the entity will end up.
[mapping:<name>]
archive: lib1.a
entries:
obj1:symbol1 (scheme1); section1 -> target1 KEEP SURROUND(sym1) ...
obj2 (scheme2)
...
Ultimately, an `entity (scheme)` entry generates an
input section description (see https://sourceware.org/binutils/docs/ld/Input-Section.html)
in the output linker script. It is possible to attach 'flags' to the
`entity (scheme)` to generate different output commands or to
emit additional keywords in the generated input section description. The
input section description, as well as other output commands, is defined in
output_commands.py.
"""
_any = Literal('*')
_obj = Word(alphas + '_', alphanums + '-_').set_results_name('object')
_sym = Fragment.IDENTIFIER.set_results_name('symbol')
# There are three possible patterns for mapping entries:
# obj:symbol (scheme)
# obj (scheme)
# * (scheme)
_entry = (((_obj + Opt(Suppress(':') + _sym)) | _any.set_results_name('object'))
+ Suppress('(')
+ Fragment.IDENTIFIER.set_results_name('section')
+ Suppress(')'))
ENTRY = _entry + LineEnd().suppress()
ARCHIVE = (Word(alphanums + '.-_$+') | Literal('*')) + LineEnd().suppress()
# Flags can be specified for section->target in the scheme specified, ex:
# obj (scheme);
# section->target SURROUND(symbol),
# section2->target2 ALIGN(4)
ENTRY_WITH_FLAG = (_entry + Suppress(';')
+ delimited_list(Flag.FLAG.set_parse_action(Flag.parse)))
def __init__(self, archive: str, flags: Dict[Any, Flag], *args, **kwargs):
super().__init__(*args, **kwargs)
self.archive = archive
self.flags = flags
@staticmethod
def parse_archive(s, loc, toks):
this = toks[0][0]
if len(this) != 1:
raise ParseFatalException(s, loc, 'Could only specify one archive file in one mapping fragment')
return this[0]
@staticmethod
def parse_entry(toks):
return toks.object, toks.symbol or None, toks.section
@staticmethod
def parse_entry_with_flag(toks):
entry = toks.object, toks.symbol or None, toks.section
return {
entry: [tok for tok in toks if isinstance(tok, Flag)]
}
@staticmethod
def parse_entries(toks):
return toks[0]
@staticmethod
def parse(toks):
this = toks[0]
name = this[0]
archive = this[1]
entries_or_dict_with_flags = this[2]
entries = set()
flags = dict()
for item in entries_or_dict_with_flags:
if isinstance(item, Empty):
continue
elif isinstance(item, dict): # entry with flags
for k, v in item.items():
entries.add(k)
if k in flags:
flags[k].extend(v)
else:
flags[k] = v
else:
entries.add(item)
return Mapping(archive=archive, name=name, entries=entries, flags=flags)
class FragmentFile:
"""
Processes a fragment file and stores all parsed fragments. For
more information on how this class interacts with classes for the different fragment types,
see description of Fragment.
"""
def __init__(self, fragments: List[Fragment]):
self.path = None # assign later, couldn't pass extra argument while parsing
self.fragments: List[Fragment] = fragments
def __repr__(self):
return str(self.__dict__)
def parse_fragment_file(path, sdkconfig):
def parse_conditional(toks):
this = toks[0]
for stmt in this:
if stmt[0] in ['if', 'elif']: # if/elif
if sdkconfig.evaluate_expression(stmt.condition):
return stmt[-1]
else: # else
return stmt[-1]
return Empty()
def get_conditional_stmt(_stmt):
condition = SkipTo(':').set_results_name('condition') + Suppress(':')
_suite = IndentedBlock(_stmt)
if_decl = Literal('if') + condition
elif_decl = Literal('elif') + condition
else_decl = Literal('else:')
if_ = Group(if_decl + _suite)
elif_ = Group(elif_decl + _suite)
else_ = Group(else_decl + _suite)
return Group(if_ + Opt(OneOrMore(elif_)) + Opt(else_)).set_parse_action(parse_conditional)
def get_suite(_stmt):
__stmt = Forward()
__conditional = get_conditional_stmt(__stmt)
__stmt <<= (comment
| _stmt
| __conditional)
return IndentedBlock(__stmt)
def parse(toks):
return FragmentFile([tok for tok in toks if not isinstance(tok, Empty)])
# comment
comment = (Literal('#') + rest_of_line).set_parse_action(lambda s, l, t: Empty())
# section
section_entry = Sections.ENTRY.set_parse_action(Sections.parse_entry)
section_entries_suite = get_suite(section_entry)
section_header = Suppress('[sections:') + Fragment.IDENTIFIER + Suppress(']') + LineEnd().suppress()
section = Group(section_header
+ Suppress('entries:')
+ section_entries_suite).set_parse_action(Sections.parse)
# scheme
scheme_entry = Scheme.ENTRY.set_parse_action(Scheme.parse_entry)
scheme_entries_suite = get_suite(scheme_entry)
scheme_header = Suppress('[scheme:') + Fragment.IDENTIFIER + Suppress(']') + LineEnd().suppress()
scheme = Group(scheme_header
+ Suppress('entries:')
+ scheme_entries_suite).set_parse_action(Scheme.parse)
# mapping
mapping_archive = Mapping.ARCHIVE
mapping_archive_suite = get_suite(mapping_archive)
mapping_entry = Mapping.ENTRY.set_parse_action(Mapping.parse_entry)
mapping_entry_with_flag = Mapping.ENTRY_WITH_FLAG.set_parse_action(Mapping.parse_entry_with_flag)
mapping_entries_suite = get_suite(mapping_entry | mapping_entry_with_flag)
mapping_header = Suppress('[mapping:') + Fragment.IDENTIFIER + Suppress(']')
mapping = Group(mapping_header
+ Group(Suppress('archive:')
+ mapping_archive_suite).set_parse_action(Mapping.parse_archive)
+ Group(Suppress('entries:')
+ mapping_entries_suite).set_parse_action(Mapping.parse_entries)
).set_parse_action(Mapping.parse)
# highest level
fragment = (section
| scheme
| mapping
| get_conditional_stmt(section | scheme | mapping))
parser = ZeroOrMore(fragment).ignore(comment).set_parse_action(parse)
fragment_file = parser.parse_file(path, parse_all=True)[0]
fragment_file.path = path
return fragment_file
| espressif/esp-idf | tools/ldgen/ldgen/fragments.py | Python | apache-2.0 | 14,903 |
import math
import random
import numpy as np
from ..commons.utils import *
from ...tools import dropout, add_bias
["check_network_structure", "verify_dataset_shape_and_modify", "print_training_status", "print_training_results"]
def backpropagation_foundation(network, trainingset, testset, cost_function, calculate_dW, evaluation_function=None,
ERROR_LIMIT=1e-3, max_iterations=(), batch_size=0, input_layer_dropout=0.0,
hidden_layer_dropout=0.0, print_rate=1000, save_trained_network=False, **kwargs):
check_network_structure(network, cost_function) # check for special case topology requirements, such as softmax
training_data, training_targets = verify_dataset_shape_and_modify(network, trainingset)
test_data, test_targets = verify_dataset_shape_and_modify(network, testset)
# Whether to use another function for printing the dataset error than the cost function.
# This is useful if you train the network with the MSE cost function, but are going to
# classify rather than regress on your data.
if evaluation_function != None:
calculate_print_error = evaluation_function
else:
calculate_print_error = cost_function
batch_size = batch_size if batch_size != 0 else training_data.shape[0]
batch_training_data = np.array_split(training_data, math.ceil(1.0 * training_data.shape[0] / batch_size))
batch_training_targets = np.array_split(training_targets, math.ceil(1.0 * training_targets.shape[0] / batch_size))
batch_indices = range(len(batch_training_data)) # fast reference to batches
error = calculate_print_error(network.update(test_data), test_targets)
reversed_layer_indexes = range(len(network.layers))[::-1]
epoch = 0
while error > ERROR_LIMIT and epoch < max_iterations:
epoch += 1
random.shuffle(batch_indices) # Shuffle the order in which the batches are processed between the iterations
for batch_index in batch_indices:
batch_data = batch_training_data[batch_index]
batch_targets = batch_training_targets[batch_index]
batch_size = float(batch_data.shape[0])
input_signals, derivatives = network.update(batch_data, trace=True)
out = input_signals[-1]
cost_derivative = cost_function(out, batch_targets, derivative=True).T
delta = cost_derivative * derivatives[-1]
for i in reversed_layer_indexes:
# Loop over the weight layers in reversed order to calculate the deltas
# perform dropout
dropped = dropout(
input_signals[i],
# dropout probability
hidden_layer_dropout if i > 0 else input_layer_dropout
)
# calculate the weight change
dX = (np.dot(delta, add_bias(dropped)) / batch_size).T
dW = calculate_dW(i, dX)
if i != 0:
"""Do not calculate the delta unnecessarily."""
# Skip the bias weight
weight_delta = np.dot(network.weights[i][1:, :], delta)
# Calculate the delta for the subsequent layer
delta = weight_delta * derivatives[i - 1]
# Update the weights with Nestrov Momentum
network.weights[i] += dW
# end weight adjustment loop
error = calculate_print_error(network.update(test_data), test_targets)
if epoch % print_rate == 0:
# Show the current training status
print "[training] Current error:", error, "\tEpoch:", epoch
print "[training] Finished:"
print "[training] Converged to error bound (%.4g) with error %.4g." % (ERROR_LIMIT, error)
print "[training] Measured quality: %.4g" % network.measure_quality(training_data, training_targets,
cost_function)
print "[training] Trained for %d epochs." % epoch
if save_trained_network and confirm(promt="Do you wish to store the trained network?"):
network.save_network_to_file()
# end backprop
| DailyActie/Surrogate-Model | 01-codes/python-neural-network-master/nimblenet/learning_algorithms/backpropagation/base.py | Python | mit | 4,246 |
#!/usr/bin/python
#coding=utf-8
#FileName: sms_alarm.py V1.0
'''
2012-07-31 修改content 编码格式由原来的UTF-8修改为gb2312以使发送邮件内容支持中文
'''
import smtplib
import sys
import email
from email.mime.text import MIMEText
#========================================
#需要配置
send_mail_host="smtp.126.com" # 发送的smtp
send_mail_user="username"
send_mail_user_name="system_alarm"
send_mail_pswd="password"
send_mail_postfix="126.com" #发邮件的域名
get_mail_user="PhoneNum"
#以下不用配置=============================
get_mail_postfix="139.com"
get_mail_host="pop.139.com"
#========================================
def send_mail(sub,content):
'''
sub:主题
content:内容
send_mail("xxxxx@xxx.xxx","主题","内容")
'''
send_mail_address=send_mail_user_name+"<"+send_mail_user+"@"+send_mail_postfix+">"
msg=email.mime.text.MIMEText(content,_subtype="html",_charset="gb2312")
msg['Subject']=sub
msg['From']=send_mail_address
msg['to']=to_adress="139SMSserver<"+get_mail_user+"@"+get_mail_postfix+">"
try:
stp = smtplib.SMTP()
stp.connect(send_mail_host)
stp.login(send_mail_user,send_mail_pswd)
stp.sendmail(send_mail_address, to_adress, msg.as_string())
stp.close()
return True
except Exception, e:
print str(e)
return False
if __name__ == '__main__':
if send_mail('sub',content):
print "发送成功"
else:
print "发送失败"
| honglei619/AutoPost | sms_alarm.py | Python | gpl-2.0 | 1,530 |
import json
class TagsEndpointsMixin(object):
"""For endpoints in ``/tags/``."""
def tag_info(self, tag):
"""
Get tag info
:param tag:
:return:
"""
endpoint = 'tags/{tag!s}/info/'.format(**{'tag': tag})
res = self._call_api(endpoint)
return res
def tag_related(self, tag, **kwargs):
"""
Get related tags
:param tag:
:return:
"""
endpoint = 'tags/{tag!s}/related/'.format(**{'tag': tag})
query = {
'visited': json.dumps([{'id': tag, 'type': 'hashtag'}], separators=(',', ':')),
'related_types': json.dumps(['hashtag'], separators=(',', ':'))}
res = self._call_api(endpoint, query=query)
return res
def tag_search(self, text, **kwargs):
"""
Search tag
:param text:
:param kwargs:
:return:
"""
query = {
'is_typeahead': True,
'q': text,
'rank_token': self.rank_token,
}
query.update(kwargs)
res = self._call_api('tags/search/', query=query)
return res
| hideki-saito/Data-Service-for-Instagram | instagram_private_api/endpoints/tags.py | Python | mit | 1,157 |
## @file current/tests.py
# @brief current server status unit testing
import datetime
import django.test
import models
import views
class CurrentModelTestCase(django.test.TestCase):
"""test module model"""
pass
class CurrentViewTestCase(django.test.TestCase):
"""test module interface"""
def test01time(self):
"""check server current time"""
self.assertEqual( views.time({}),
str(datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) )
def test02get(self):
"""check if service return non-empty dict"""
d = views.get({})
self.assertEqual( len(d), 2)
| mingless/bayesian_webclass | bioweb/web/current/tests.py | Python | mit | 677 |
from django.test import TestCase
from django.conf import settings
from django.contrib.sites.models import Site
from django.db.models.query import QuerySet
from preferences import preferences
from music.models import TrackContributor, Credit, Track, Album, CreditOption
from music.utils import wikipedia, lastfm
class ScraperTestCase(TestCase):
@classmethod
def setUpClass(cls):
# Disable scraping
settings.JMBO_MUSIC['scrapers'] = []
# Bootstrap music preferences
prefs = preferences.MusicPreferences
prefs.save()
creditoption = CreditOption.objects.create(
music_preferences=prefs, role_type='artist', role_name='Artist',
role_priority=1
)
# Legitimate entries
artist = TrackContributor.objects.create(title="Oasis")
album = Album.objects.create(title="What's the story morning glory")
track = Track.objects.create(title="Don't look back in anger")
track.create_credit("Oasis", "artist")
track.album.add(album.id)
track.save()
cls.wikipedia_artist = artist
cls.wikipedia_album = album
cls.wikipedia_track = track
artist = TrackContributor.objects.create(title="Foo Fighters")
album = Album.objects.create(title="One By One")
track = Track.objects.create(title="All My Life")
track.create_credit("Foo Fighters", "artist")
track.album.add(album.id)
track.save()
cls.lastfm_artist = artist
cls.lastfm_album = album
cls.lastfm_track = track
# Illegitimate entries
artist = TrackContributor.objects.create(title="vgnfdnvnvfnsncfd")
album = Album.objects.create(title="tggbfbvfvf")
track = Track.objects.create(title="grfgrgeagteg")
track.create_credit("vgnfdnvnvfnsncfd", "artist")
track.album = [album]
track.save()
cls.iartist = artist
cls.ialbum = album
cls.itrack = track
def test_wikipedia(self):
settings.JMBO_MUSIC['scrapers'] = ['wikipedia']
wikipedia(self.wikipedia_artist)
wikipedia(self.wikipedia_album)
wikipedia(self.wikipedia_track)
wikipedia(self.iartist)
wikipedia(self.ialbum)
wikipedia(self.itrack)
self.failUnless(self.wikipedia_artist.image)
self.failUnless(self.wikipedia_album.image)
self.failUnless(self.wikipedia_track.image)
self.failIf(self.iartist.image)
self.failIf(self.ialbum.image)
# Track is exempt because it always gets a default image
def test_lastfm(self):
# Abort test if no API key was set
try:
dc = settings.JMBO_MUSIC['lastfm_api_key']
dc = settings.JMBO_MUSIC['lastfm_api_secret']
except KeyError:
return
settings.JMBO_MUSIC['scrapers'] = ['lastfm']
lastfm(self.lastfm_artist)
lastfm(self.lastfm_album)
lastfm(self.lastfm_track)
lastfm(self.iartist)
lastfm(self.ialbum)
lastfm(self.itrack)
self.failUnless(self.lastfm_artist.image)
self.failUnless(self.lastfm_album.image)
self.failUnless(self.lastfm_track.image)
self.failIf(self.iartist.image)
self.failIf(self.ialbum.image)
# Track is exempt because it always gets a default image
| praekelt/jmbo-music | music/tests/__init__.py | Python | bsd-3-clause | 3,379 |
#!/usr/bin/env python
"""Test APIFlask class for input parameters causing TypeErrors.
"""
import apikit
import pytest
def test_lsstflask_type_errors():
"""Test APIFlask for input parameters causing TypeErrors.
"""
# No arguments at all.
# Obviously the linter is correct here...
with pytest.raises(TypeError):
# pylint: disable=no-value-for-parameter
apikit.APIFlask()
# Name is not a string
with pytest.raises(TypeError):
apikit.APIFlask(("Beer", "me"), "2.0", "http://example.repo",
"BobApp")
# Version is not a string
with pytest.raises(TypeError):
apikit.APIFlask("bob", 2.0, "http://example.repo", "BobApp")
# Repository is not a string
with pytest.raises(TypeError):
apikit.APIFlask("bob", 2.0, ["repo", "man"], "BobApp")
# Description is not a string
with pytest.raises(TypeError):
apikit.APIFlask("bob", 2.0, "", "http://example.repo",
{"totally": "bogus"})
# Auth is not None, the empty string or "none", or a dict
with pytest.raises(TypeError):
apikit.APIFlask("bob", "2.0", "http://example.repo",
"BobApp", auth=5)
# Auth is not None, the empty string or "none", or a dict
with pytest.raises(TypeError):
apikit.APIFlask("bob", "2.0", "http://example.repo", "BobApp",
auth="bob")
# Api_version is not a string
with pytest.raises(TypeError):
apikit.APIFlask("bob", "2.0", "http://example.repo", "BobApp",
api_version=5, auth="")
# Route is not None, a string, or a list of strings
with pytest.raises(TypeError):
apikit.APIFlask("bob", "2.0", "http://example.repo", "BobApp",
route=2)
# Route is a list that contains a non-string
with pytest.raises(TypeError):
apikit.APIFlask("bob", "2.0", "http://example.repo", "BobApp",
route=[2])
| lsst-sqre/sqre-apikit | tests/test_lsstflask_type_errors.py | Python | mit | 1,993 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._keys_operations import build_create_or_update_request, build_delete_request, build_get_request, build_list_by_workspace_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class KeysOperations:
"""KeysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_workspace(
self,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> AsyncIterable["_models.KeyInfoListResult"]:
"""Returns a list of keys in a workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either KeyInfoListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.synapse.models.KeyInfoListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyInfoListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_workspace_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.list_by_workspace.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_workspace_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("KeyInfoListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_workspace.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
workspace_name: str,
key_name: str,
**kwargs: Any
) -> "_models.Key":
"""Gets a workspace key.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param key_name: The name of the workspace key.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Key, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.Key
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Key"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
key_name=key_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Key', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys/{keyName}'} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
key_name: str,
key_properties: "_models.Key",
**kwargs: Any
) -> "_models.Key":
"""Creates or updates a workspace key.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param key_name: The name of the workspace key.
:type key_name: str
:param key_properties: Key put request properties.
:type key_properties: ~azure.mgmt.synapse.models.Key
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Key, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.Key
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Key"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(key_properties, 'Key')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
key_name=key_name,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Key', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys/{keyName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
workspace_name: str,
key_name: str,
**kwargs: Any
) -> Optional["_models.Key"]:
"""Deletes a workspace key.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param key_name: The name of the workspace key.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Key, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.Key or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Key"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
key_name=key_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Key', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys/{keyName}'} # type: ignore
| Azure/azure-sdk-for-python | sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_keys_operations.py | Python | mit | 13,131 |
"""ColorMod tool."""
import sublime
import sublime_plugin
from .lib.coloraide import Color
import mdpopups
from .lib import colorbox
from . import ch_util as util
from . import ch_tools as tools
from .ch_mixin import _ColorMixin
import copy
DEF_COLORMOD = """---
markdown_extensions:
- markdown.extensions.attr_list
- markdown.extensions.def_list
- pymdownx.betterem
...
{}
## Format
<code>color(color_syntax adjuster*)</code>
Also accepts normal CSS color syntax. Color functions can be nested.
## Instructions
Colors must be in the **sRGB**, **HSL**, or **HWB** color space. Other color<br>
spaces may be parsed outside of `color()`, but modern CSS colors <br>
that are defined using the conflicting `color()` format will not work.
Supported adjusters are <code>alpha()</code>, <code>a()</code>, <code>lightness()</code>, <code>l()</code>,<br>
<code>saturation()</code>, <code>s()</code>, <code>blend()</code>, and <code>blenda()</code>.
Please see [Sublime Text Documentation](https://www.sublimetext.com/docs/color_schemes.html#colors) for more info.
"""
class ColorHelperColorModInputHandler(tools._ColorInputHandler):
"""Handle color inputs."""
def __init__(self, view, initial=None, **kwargs):
"""Initialize."""
self.color = initial
super().__init__(view, **kwargs)
def placeholder(self):
"""Placeholder."""
return "Color"
def initial_text(self):
"""Initial text."""
self.color_mod_class = util.import_color("ColorHelper.custom.st_colormod.Color")
if self.color is not None:
return self.color
elif len(self.view.sel()) == 1:
self.setup_color_class()
text = self.view.substr(self.view.sel()[0])
if text:
color = None
if self.custom_color_class == self.color_mod_class:
# Try and handle a `color()` case if the file uses `color-mod`.
# Basically, if the file already supports `color-mod` input,
# then we want to return the text raw if it parses.
try:
color = self.color_mod_class(text, filters=util.CSS_SRGB_SPACES)
except Exception:
pass
if color is None:
# Try to use the current file's input format and convert input
# to the default string output for the color.
try:
color = self.custom_color_class(text, filters=self.filters)
except Exception:
pass
if color is not None:
# convert to a `color-mod` instance.
return self.color_mod_class(color).to_string(**util.DEFAULT)
else:
return text
return ''
def preview(self, text):
"""Preview."""
style = self.get_html_style()
try:
html = None
color = self.color_mod_class(text.strip())
if color is not None:
pcolor = Color(color)
preview_border = self.default_border
message = ""
if self.gamut_space == 'srgb':
check_space = self.gamut_space if pcolor.space() not in util.SRGB_SPACES else pcolor.space()
else:
check_space = self.gamut_space
if not pcolor.in_gamut(check_space):
message = '<br><em style="font-size: 0.9em;">* preview out of gamut</em>'
pcolor.convert(self.gamut_space, fit=True, in_place=True)
preview = pcolor.clone().set('alpha', 1)
preview_alpha = pcolor
preview_border = self.default_border
temp = Color(preview_border)
if temp.luminance() < 0.5:
second_border = temp.mix('white', 0.25, space=self.gamut_space, out_space=self.gamut_space)
second_border.set('alpha', 1)
else:
second_border = temp.mix('black', 0.25, space=self.gamut_space, out_space=self.gamut_space)
second_border.set('alpha', 1)
height = self.height * 3
width = self.width * 3
check_size = self.check_size(height, scale=8)
html = tools.PREVIEW_IMG.format(
colorbox.color_box(
[preview, preview_alpha],
preview_border, second_border,
border_size=1, height=height, width=width, check_size=check_size
),
message,
color.to_string(**util.DEFAULT)
)
if html:
return sublime.Html(style + html)
else:
return sublime.Html(mdpopups.md2html(self.view, DEF_COLORMOD.format(style)))
except Exception:
return sublime.Html(mdpopups.md2html(self.view, DEF_COLORMOD.format(style)))
def validate(self, color):
"""Validate."""
try:
color = self.color_mod_class(color.strip())
return color is not None
except Exception:
return False
class ColorHelperSublimeColorModCommand(_ColorMixin, sublime_plugin.TextCommand):
"""Open edit a color directly."""
def run(
self, edit, color_helper_color_mod, initial=None, on_done=None, **kwargs
):
"""Run command."""
text = color_helper_color_mod.strip()
self.custom_color_class = util.import_color("ColorHelper.custom.st_colormod.Color")
color = self.custom_color_class(text)
if color is not None:
if on_done is None:
on_done = {
'command': 'color_helper',
'args': {'mode': "result", "insert_raw": text, "result_type": "__tool__:__colormod__"}
}
call = on_done.get('command')
if call is None:
return
args = copy.deepcopy(on_done.get('args', {}))
args['color'] = color.to_string(**util.COLOR_FULL_PREC)
args['insert_raw'] = text
self.view.run_command(call, args)
def input(self, kwargs): # noqa: A003
"""Input."""
return ColorHelperColorModInputHandler(self.view, **kwargs)
| facelessuser/ColorHelper | ch_tool_colormod.py | Python | mit | 6,471 |
"""Gaussian processes regression."""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Modified by: Pete Green <p.l.green@liverpool.ac.uk>
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve
import scipy.optimize
from ..base import BaseEstimator, RegressorMixin, clone
from ..base import MultiOutputMixin
from .kernels import RBF, ConstantKernel as C
from ..preprocessing._data import _handle_zeros_in_scale
from ..utils import check_random_state
from ..utils.optimize import _check_optimize_result
class GaussianProcessRegressor(MultiOutputMixin,
RegressorMixin, BaseEstimator):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard scikit-learn estimator API,
GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method `sample_y(X)`, which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method `log_marginal_likelihood(theta)`, which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Read more in the :ref:`User Guide <gaussian_process>`.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel instance, default=None
The kernel specifying the covariance function of the GP. If None is
passed, the kernel ``ConstantKernel(1.0, constant_value_bounds="fixed"
* RBF(1.0, length_scale_bounds="fixed")`` is used as default. Note that
the kernel hyperparameters are optimized during fitting unless the
bounds are marked as "fixed".
alpha : float or ndarray of shape (n_samples,), default=1e-10
Value added to the diagonal of the kernel matrix during fitting.
This can prevent a potential numerical issue during fitting, by
ensuring that the calculated values form a positive definite matrix.
It can also be interpreted as the variance of additional Gaussian
measurement noise on the training observations. Note that this is
different from using a `WhiteKernel`. If an array is passed, it must
have the same number of entries as the data used for fitting and is
used as datapoint-dependent noise level. Allowing to specify the
noise level directly as a parameter is mainly for convenience and
for consistency with Ridge.
optimizer : "fmin_l_bfgs_b" or callable, default="fmin_l_bfgs_b"
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func': the objective function to be minimized, which
# takes the hyperparameters theta as a parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, default=0
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y : bool, default=False
Whether the target values y are normalized, the mean and variance of
the target values are set equal to 0 and 1 respectively. This is
recommended for cases where zero-mean, unit-variance priors are used.
Note that, in this implementation, the normalisation is reversed
before the GP predictions are reported.
.. versionchanged:: 0.23
copy_X_train : bool, default=True
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, default=None
Determines random number generation used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
X_train_ : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data (also
required for prediction).
y_train_ : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values in training data (also required for prediction)
kernel_ : kernel instance
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like of shape (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_ : array-like of shape (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = DotProduct() + WhiteKernel()
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3680...
>>> gpr.predict(X[:2,:], return_std=True)
(array([653.0..., 592.1...]), array([316.6..., 316.6...]))
"""
def __init__(self, kernel=None, *, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self._rng = check_random_state(self.random_state)
if self.kernel_.requires_vector_input:
X, y = self._validate_data(X, y, multi_output=True, y_numeric=True,
ensure_2d=True, dtype="numeric")
else:
X, y = self._validate_data(X, y, multi_output=True, y_numeric=True,
ensure_2d=False, dtype=None)
# Normalize target value
if self.normalize_y:
self._y_train_mean = np.mean(y, axis=0)
self._y_train_std = _handle_zeros_in_scale(
np.std(y, axis=0), copy=False
)
# Remove mean and make unit variance
y = (y - self._y_train_mean) / self._y_train_std
else:
self._y_train_mean = np.zeros(1)
self._y_train_std = 1
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array "
"with same number of entries as y. (%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True, clone_kernel=False)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta,
clone_kernel=False)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self._rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.kernel_._check_bounds_params()
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta,
clone_kernel=False)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
self.L_ = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError as exc:
exc.args = ("The kernel, %s, is not returning a "
"positive definite matrix. Try gradually "
"increasing the 'alpha' parameter of your "
"GaussianProcessRegressor estimator."
% self.kernel_,) + exc.args
raise
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, optionally also
returns its standard deviation (`return_std=True`) or covariance
(`return_cov=True`). Note that at most one of the two can be requested.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated.
return_std : bool, default=False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default=False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean.
Returns
-------
y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets)
Mean of predictive distribution a query points.
y_std : ndarray of shape (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when `return_std` is True.
y_cov : ndarray of shape (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when `return_cov` is True.
"""
if return_std and return_cov:
raise RuntimeError(
"At most one of return_std or return_cov can be requested.")
if self.kernel is None or self.kernel.requires_vector_input:
X = self._validate_data(X, ensure_2d=True, dtype="numeric",
reset=False)
else:
X = self._validate_data(X, ensure_2d=False, dtype=None,
reset=False)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
if self.kernel is None:
kernel = (C(1.0, constant_value_bounds="fixed") *
RBF(1.0, length_scale_bounds="fixed"))
else:
kernel = self.kernel
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = kernel(X)
return y_mean, y_cov
elif return_std:
y_var = kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
# undo normalisation
y_mean = self._y_train_std * y_mean + self._y_train_mean
if return_cov:
# Solve K @ V = K_trans.T
V = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(V) # Line 6
# undo normalisation
y_cov = y_cov * self._y_train_std**2
return y_mean, y_cov
elif return_std:
# Solve K @ V = K_trans.T
V = cho_solve((self.L_, True), K_trans.T) # Line 5
# Compute variance of predictive distribution
# Use einsum to avoid explicitly forming the large matrix
# K_trans @ V just to extract its diagonal afterward.
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ij,ji->i", K_trans, V)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
# undo normalisation
y_var = y_var * self._y_train_std**2
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Query points where the GP is evaluated.
n_samples : int, default=1
Number of samples drawn from the Gaussian process per query point
random_state : int, RandomState instance or None, default=0
Determines random number generation to randomly draw samples.
Pass an int for reproducible results across multiple function
calls.
See :term:`Glossary <random_state>`.
Returns
-------
y_samples : ndarray of shape (n_samples_X, n_samples), or \
(n_samples_X, n_targets, n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False,
clone_kernel=True):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like of shape (n_kernel_params,) default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
if clone_kernel:
kernel = self.kernel_.clone_with_theta(theta)
else:
kernel = self.kernel_
kernel.theta = theta
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,jik->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
opt_res = scipy.optimize.minimize(
obj_func, initial_theta, method="L-BFGS-B", jac=True,
bounds=bounds)
_check_optimize_result("lbfgs", opt_res)
theta_opt, func_min = opt_res.x, opt_res.fun
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
def _more_tags(self):
return {'requires_fit': False}
| kevin-intel/scikit-learn | sklearn/gaussian_process/_gpr.py | Python | bsd-3-clause | 22,428 |
import numpy as np
import logging
from sklearn.metrics import roc_auc_score
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log = logging.getLogger(__name__)
heaviside = lambda x: 1 if x >= 0 else 0
MIN_VAL = np.iinfo(np.int32).min
class AMDC:
"""
Implementation of Active Multi-relational Data Construction (AMDC) method.
Reference:
Kajino, H., Kishimoto, A., Botea, A., Daly, E., & Kotoulas, S. (2015). Active Learning for Multi-relational Data
Construction. WWW 2015
"""
def __init__(self, n_dim, alpha_0=0.1, gamma=0.3, gamma_p=0.9, c_e=5., c_n=1., population=False):
"""
Parameters
----------
n_dim: latent dimension of entity
alpha_0: initial learning rate
gamma: hyperparameter
gamma_p: hyperparameter
c_e: hyperparameter, impose score of positive triple to be greater than 0,
and negative triple to be less than 0
c_n: hyperparameter, importance of negative samples
"""
self.n_dim = n_dim
self.alpha_0 = alpha_0
self.gamma = gamma
self.gamma_p = gamma_p
self.c_e = c_e
self.c_n = c_n
self.population = population
def fit(self, T, p_idx, n_idx, max_iter=100, e_gap=100, A=None, R=None, obs_only=False):
"""
Stochastic gradient descent optimization for AMDC
Parameters
----------
T: [n_entity x n_entity x n_relation] multi-dimensional array,
tensor representation of knowledge graph
n_entity = number of entities
n_relation = number of relationships
p_idx: index of observed positive triples, all indices are raveled by np.ravel_multi_index
n_idx: index of observed negative triples
max_iter: maximum number of iterations
e_gap: evaluation gap
obs_only: When this parameter is True, the stochastic gradient step uses the
observed positive and negative triples only.
Returns
A, R, r_error
A: [n_entity x n_dim] latent feature vector of entities
R: [n_dim x n_dim x n_relation] rotation matrix for each entity
n_dim = size of latent dimension
r_error: list of reconstruction errors at each evaluation point
"""
n_entity, n_relation = T.shape[0], T.shape[2]
np_idx = np.setdiff1d(range(np.prod(T.shape)), p_idx) # not positive index
nn_idx = np.setdiff1d(range(np.prod(T.shape)), n_idx) # not negative index
if isinstance(A, type(None)):
A = np.random.random([n_entity, self.n_dim]) - 0.5
A /= np.linalg.norm(A, ord=2, axis=1)[:, np.newaxis]
R = np.zeros([self.n_dim, self.n_dim, n_relation]) # rotation matrices
for k in range(n_relation):
R[:, :, k] = np.identity(self.n_dim)
r_error = list()
if len(p_idx) == 0 and len(n_idx) == 0:
return A, R, r_error
if obs_only and (len(p_idx) == 0 or len(n_idx) == 0):
return A, R, r_error
it = 0
converged = False
learning_rate = self.alpha_0
while not converged:
if not obs_only:
selector = np.random.randint(100) % 2
if len(p_idx) == 0:
selector = 0
elif len(n_idx) == 0:
selector = 1
if selector:
next_idx = np.random.randint(len(p_idx))
next_np_idx = np.random.randint(len(np_idx))
i, j, k = np.unravel_index(p_idx[next_idx], T.shape)
i_bar, j_bar, k_bar = np.unravel_index(np_idx[next_np_idx], T.shape)
I_1 = heaviside(self.gamma - np.dot(np.dot(A[i], R[:, :, k]), A[j])
+ np.dot(np.dot(A[i_bar], R[:, :, k_bar]), A[j_bar]))
I_2 = heaviside(self.gamma_p - np.dot(np.dot(A[i], R[:, :, k]), A[j]))
# updating parameters
if I_1 != 0 or I_2 != 0:
a_i, a_j, r_k = A[i].copy(), A[j].copy(), R[:, :, k].copy()
A[i] -= learning_rate * (-(I_1 + I_2 * self.c_e) * np.dot(r_k, a_j))
A[j] -= learning_rate * (-(I_1 + I_2 * self.c_e) * np.dot(r_k.T, a_i))
R[:, :, k] -= learning_rate * (-(I_1 + I_2 * self.c_e) * np.outer(a_i, a_j))
if I_1 != 0:
a_i_bar, a_j_bar, r_k_bar = A[i_bar].copy(), A[j_bar].copy(), R[:, :, k_bar].copy()
A[i_bar] -= learning_rate * (I_1 * np.dot(r_k_bar, a_j_bar))
A[j_bar] -= learning_rate * (I_1 * np.dot(r_k_bar.T, a_i_bar))
R[:, :, k_bar] -= learning_rate * (I_1 * np.outer(a_i_bar, a_j_bar))
else:
next_idx = np.random.randint(len(n_idx))
next_nn_idx = np.random.randint(len(nn_idx))
i, j, k = np.unravel_index(n_idx[next_idx], T.shape)
i_bar, j_bar, k_bar = np.unravel_index(nn_idx[next_nn_idx], T.shape)
I_3 = heaviside(self.gamma + np.dot(np.dot(A[i], R[:, :, k]), A[j])
- np.dot(np.dot(A[i_bar], R[:, :, k_bar]), A[j_bar]))
I_4 = heaviside(self.gamma_p + np.dot(np.dot(A[i], R[:, :, k]), A[j]))
if I_3 != 0 or I_4 != 0:
a_i, a_j, r_k = A[i].copy(), A[j].copy(), R[:, :, k].copy()
A[i] -= learning_rate * ((I_3 * self.c_n + I_4 * self.c_e) * np.dot(r_k, a_j))
A[j] -= learning_rate * ((I_3 * self.c_n + I_4 * self.c_e) * np.dot(r_k.T, a_i))
R[:, :, k] -= learning_rate * ((I_3 * self.c_n + I_4 * self.c_e) * np.outer(a_i, a_j))
if I_3 != 0:
a_i_bar, a_j_bar, r_k_bar = A[i_bar].copy(), A[j_bar].copy(), R[:, :, k_bar].copy()
A[i_bar] -= learning_rate * (I_3 * self.c_n * np.dot(r_k_bar, a_j_bar))
A[j_bar] -= learning_rate * (I_3 * self.c_n * np.dot(r_k_bar.T, a_i_bar))
R[:, :, k_bar] -= learning_rate * (I_3 * self.c_n * np.outer(a_i_bar, a_j_bar))
elif obs_only:
next_p_idx = np.random.randint(len(p_idx))
next_n_idx = np.random.randint(len(n_idx))
i, j, k = np.unravel_index(p_idx[next_p_idx], T.shape)
i_bar, j_bar, k_bar = np.unravel_index(np_idx[next_n_idx], T.shape)
I_1 = heaviside(self.gamma - np.dot(np.dot(A[i], R[:, :, k]), A[j])
+ np.dot(np.dot(A[i_bar], R[:, :, k_bar]), A[j_bar]))
I_2 = heaviside(self.gamma_p - np.dot(np.dot(A[i], R[:, :, k]), A[j]))
I_4 = heaviside(self.gamma_p + np.dot(np.dot(A[i_bar], R[:, :, k_bar]), A[j_bar]))
if I_1 != 0 or I_2 != 0:
a_i, a_j, r_k = A[i].copy(), A[j].copy(), R[:, :, k].copy()
A[i] -= learning_rate * (-(I_1 + I_2 * self.c_e) * np.dot(r_k, a_j))
A[j] -= learning_rate * (-(I_1 + I_2 * self.c_e) * np.dot(r_k.T, a_i))
R[:, :, k] -= learning_rate * (-(I_1 + I_2 * self.c_e) * np.outer(a_i, a_j))
if I_1 != 0 or I_4 != 0:
a_i, a_j, r_k = A[i_bar].copy(), A[j_bar].copy(), R[:, :, k_bar].copy()
A[i] -= learning_rate * ((I_1 + I_4 * self.c_e) * np.dot(r_k, a_j))
A[j] -= learning_rate * ((I_1 + I_4 * self.c_e) * np.dot(r_k.T, a_i))
R[:, :, k] -= learning_rate * ((I_1 + I_4 * self.c_e) * np.outer(a_i, a_j))
# unit vector projection (this could be improved by using l1 projection alg.)
# converting learned matrix to rotational matrix
A[i] /= np.linalg.norm(A[i], ord=2)
A[j] /= np.linalg.norm(A[j], ord=2)
U, sigma, V = np.linalg.svd(R[:, :, k])
R[:, :, k] = np.dot(U, V)
A[i_bar] /= np.linalg.norm(A[i_bar], ord=2)
A[j_bar] /= np.linalg.norm(A[j_bar], ord=2)
U, sigma, V = np.linalg.svd(R[:, :, k_bar])
R[:, :, k_bar] = np.dot(U, V)
if it >= max_iter:
converged = True
if it % e_gap == 0 and it != 0:
T_bar = self.reconstruct(A, R)
_T = T.copy()
_T[_T == -1] = 0
T_bar = (T_bar + 1.) / 2.
err = 0.
for k in range(n_relation):
err += roc_auc_score(_T[:, :, k].flatten(), T_bar[:, :, k].flatten())
err /= float(n_relation)
obj = self.evaluate_objfn(A, R, p_idx, n_idx)
r_error.append((obj, err))
log.debug('Iter %d, ObjectiveFn: %.5f, ROC-AUC: %.5f' % (it, obj, err))
else:
log.debug('Iter %d' % (it))
it += 1
learning_rate = self.alpha_0 / np.sqrt(it)
return A, R, r_error
def reconstruct(self, A, R):
"""
Reconstruct knowledge graph from latent representations of entities and rotation matrices
Parameters
----------
A: [E x D] multi-dimensional array, latent representation of entity
R: [D x D x K] multi-dimensional array, rotation matrix for each relation
Returns
-------
[E x E x K] reconstructed knowledge graph
"""
T = np.zeros((A.shape[0], A.shape[0], R.shape[2]))
for i in range(R.shape[2]):
T[:, :, i] = np.dot(np.dot(A, R[:, :, i]), A.T)
return T
def get_next_sample(self, A, R, mask):
T = self.reconstruct(A, R)
T[mask == 1] = MIN_VAL
if self.population:
idx = T.argmax()
return np.unravel_index(idx, T.shape), idx
else:
idx = np.abs(T).argmin()
return np.unravel_index(idx, T.shape), idx
def evaluate_objfn(self, A, R, p_idx, n_idx):
"""
compute objective function of AMDC model
Parameters
----------
A: [E x D] multi-dimensional array, latent representation of entity
R: [D x D x K] multi-dimensional array, rotation matrix for each relation
p_idx: index of observed positive triples, all indices are raveled by np.ravel_multi_index
n_idx: index of observed negative triples
Returns
-------
obj: objective function of AMDC model
Equation (4) in the original paper
"""
obj = 0
total = A.shape[0] * A.shape[0] * R.shape[2]
np_idx = np.setdiff1d(range(total), p_idx) # not positive index
nn_idx = np.setdiff1d(range(total), n_idx) # not negative index
scores = self.reconstruct(A, R)
scores = scores.flatten()
# this approach requires too much memory
# first = self.gamma - scores[p_idx] + scores[np_idx][:,np.newaxis]
# first = np.sum(first[first>0])
# alternative (takes too much time...)
first, third = 0, 0
for i in p_idx:
tmp = self.gamma - scores[i] + scores[np_idx]
first += np.sum(tmp[tmp > 0])
second = self.c_e * (self.gamma_p - scores[p_idx])
second = np.sum(second[second > 0])
# third = self.c_n * (self.gamma - scores[nn_idx] + scores[n_idx][:,np.newaxis])
# third = np.sum(third[third>0])
for i in nn_idx:
tmp = self.c_n * (self.gamma - scores[i] + scores[n_idx])
third += np.sum(tmp[tmp > 0])
fourth = self.c_e * (self.gamma_p + scores[n_idx])
fourth = np.sum(fourth[fourth > 0])
obj += (first + second) / float(len(p_idx) * len(np_idx))
obj += (third + fourth) / float(len(n_idx) * len(nn_idx))
return obj
def do_active_learning(self, T, mask, max_iter, test_t=None, query_log='', eval_log='', obs_only=False):
T[T == 0] = -1
cur_obs = np.zeros_like(T)
cur_obs[mask == 1] = T[mask == 1]
p_idx = np.ravel_multi_index((cur_obs == 1).nonzero(), T.shape) # raveled positive index
n_idx = np.ravel_multi_index((cur_obs == -1).nonzero(), T.shape) # raveled negative index
pop = 0
pull_size = 1
E, K = T.shape[0], T.shape[2]
A = np.random.random([E, self.n_dim]) - 0.5
A /= np.linalg.norm(A, ord=2, axis=1)[:, np.newaxis]
R = np.zeros([self.n_dim, self.n_dim, K]) # rotation matrices
for k in range(K):
R[:, :, k] = np.identity(self.n_dim)
seq = list()
auc_scores = list()
for iter in range(max_iter):
A, R, _ = self.fit(T, p_idx, n_idx, max_iter=1000, e_gap=1001, A=A, R=R, obs_only=obs_only)
_T = self.reconstruct(A, R)
_T[mask == 1] = MIN_VAL
_T[test_t == 1] = MIN_VAL
for pull_no in range(pull_size):
if self.population:
idx = _T.argmax()
next_idx = np.unravel_index(idx, T.shape)
else:
idx = np.abs(_T).argmin()
next_idx = np.unravel_index(idx, T.shape)
if len(query_log) > 0:
with open(query_log, 'a') as f:
f.write('%d,%d,%d\n' % (next_idx[0], next_idx[1], next_idx[2]))
seq.append(next_idx)
_T[next_idx] = MIN_VAL
mask[next_idx] = 1
cur_obs[next_idx] = T[next_idx]
if cur_obs[next_idx] == 1:
pop += 1
if T[next_idx] == 1:
p_idx = np.concatenate((p_idx, (idx,)))
else:
n_idx = np.concatenate((n_idx, (idx,)))
log.debug('[NEXT IDX] %s, %d', next_idx, cur_obs[next_idx])
if not isinstance(test_t, type(None)):
_T = self.reconstruct(A, R)
auc_roc = roc_auc_score(T[test_t == 1], _T[test_t == 1])
log.info('[ITER %d] %d/%d, %.2f', iter, pop, (iter + 1) * pull_size, auc_roc)
if len(eval_log) > 0:
with open(eval_log, 'a') as f:
f.write('%f\n' % auc_roc)
auc_scores.append(auc_roc)
else:
log.info('[ITER %d] %d/%d', iter, pop, (iter + 1) * pull_size)
return seq, auc_scores
def test():
"""
Test with Kinship dataset
Use all positive triples and negative triples as a training set
See how the reconstruction error is reduced during training
"""
from scipy.io.matlab import loadmat
mat = loadmat('../data/kinship/alyawarradata.mat')
T = np.array(mat['Rs'], np.float32)
T[T == 0] = -1 # set negative value to -1
E, K = T.shape[0], T.shape[2]
max_iter = E * E * K * 10
n_dim = 10
# p_idx = np.ravel_multi_index((T == 1).nonzero(), T.shape) # raveled positive index
# n_idx = np.ravel_multi_index((T == -1).nonzero(), T.shape) # raveled negative index
# model.fit(T, p_idx, n_idx, max_iter, e_gap=10000)
training = np.random.binomial(1., 0.01, T.shape)
testing = np.random.binomial(1., 0.5, T.shape)
testing[training == 1] = 0
model = AMDC(n_dim)
model.population = True
model.do_active_learning(T, training, 15000, testing)
if __name__ == '__main__':
test()
| arongdari/almc | almc/amdc/amdc.py | Python | gpl-2.0 | 15,652 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from .... import units as u
from ...builtin_frames import ICRS, FK5
from ....time import Time
from ....table import Table
from ...angle_utilities import angular_separation
from ....utils.data import get_pkg_data_contents
# the number of tests to run
from . import N_ACCURACY_TESTS
TOLERANCE = 0.03 # arcseconds
def test_icrs_fk5():
lines = get_pkg_data_contents('icrs_fk5.csv').split('\n')
t = Table.read(lines, format='ascii', delimiter=',', guess=False)
if N_ACCURACY_TESTS >= len(t):
idxs = range(len(t))
else:
idxs = np.random.randint(len(t), size=N_ACCURACY_TESTS)
diffarcsec1 = []
diffarcsec2 = []
for i in idxs:
# Extract row
r = t[int(i)] # int here is to get around a py 3.x astropy.table bug
# ICRS to FK5
c1 = ICRS(ra=r['ra_in']*u.deg, dec=r['dec_in']*u.deg)
c2 = c1.transform_to(FK5(equinox=Time(r['equinox_fk5'], scale='utc')))
# Find difference
diff = angular_separation(c2.ra.radian, c2.dec.radian,
np.radians(r['ra_fk5']),
np.radians(r['dec_fk5']))
diffarcsec1.append(np.degrees(diff) * 3600.)
# FK5 to ICRS
c1 = FK5(ra=r['ra_in']*u.deg, dec=r['dec_in']*u.deg,
equinox=Time(r['equinox_fk5'], scale='utc'))
c2 = c1.transform_to(ICRS)
# Find difference
diff = angular_separation(c2.ra.radian, c2.dec.radian,
np.radians(r['ra_icrs']),
np.radians(r['dec_icrs']))
diffarcsec2.append(np.degrees(diff) * 3600.)
np.testing.assert_array_less(diffarcsec1, TOLERANCE)
np.testing.assert_array_less(diffarcsec2, TOLERANCE)
| funbaker/astropy | astropy/coordinates/tests/accuracy/test_icrs_fk5.py | Python | bsd-3-clause | 1,844 |
#!/usr/bin/python
import os
import SimpleHTTPServer
import SocketServer
import socket
PORT = 8000
DOC_DIR = "docs/html/"
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
os.chdir(DOC_DIR)
while 1 :
try :
httpd = SocketServer.TCPServer(("", PORT), Handler)
break
except socket.error :
PORT += 1
print "serving at port", PORT
httpd.serve_forever()
| chanke/hpss-utils | hpss-http_proxy/src/doxygen/serve_http_docs.py | Python | bsd-3-clause | 386 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0044_auto_20160306_1546'),
]
operations = [
migrations.AddField(
model_name='event',
name='cancel',
field=models.CharField(blank=True, verbose_name='Beskrivning för inställt event', null=True, help_text='Motivera varför eventet blivit inställt', max_length=255),
),
]
| I-sektionen/i-portalen | wsgi/iportalen_django/events/migrations/0045_event_cancel.py | Python | mit | 526 |
"""
The Request class is used as a wrapper around the standard request object.
The wrapped request then offers a richer API, in particular :
- content automatically parsed according to `Content-Type` header,
and available as `request.DATA`
- full support of PUT method, including support for file uploads
- form overloading of HTTP method, content type and content
"""
from __future__ import unicode_literals
from django.conf import settings
from django.http import QueryDict
from django.http.multipartparser import parse_header
from django.utils.datastructures import MultiValueDict
from rest_framework import HTTP_HEADER_ENCODING
from rest_framework import exceptions
from rest_framework.compat import BytesIO
from rest_framework.settings import api_settings
def is_form_media_type(media_type):
"""
Return True if the media type is a valid form media type.
"""
base_media_type, params = parse_header(media_type.encode(HTTP_HEADER_ENCODING))
return (base_media_type == 'application/x-www-form-urlencoded' or
base_media_type == 'multipart/form-data')
class override_method(object):
"""
A context manager that temporarily overrides the method on a request,
additionally setting the `view.request` attribute.
Usage:
with override_method(view, request, 'POST') as request:
... # Do stuff with `view` and `request`
"""
def __init__(self, view, request, method):
self.view = view
self.request = request
self.method = method
def __enter__(self):
self.view.request = clone_request(self.request, self.method)
return self.view.request
def __exit__(self, *args, **kwarg):
self.view.request = self.request
class Empty(object):
"""
Placeholder for unset attributes.
Cannot use `None`, as that may be a valid value.
"""
pass
def _hasattr(obj, name):
return not getattr(obj, name) is Empty
def clone_request(request, method):
"""
Internal helper method to clone a request, replacing with a different
HTTP method. Used for checking permissions against other methods.
"""
ret = Request(request=request._request,
parsers=request.parsers,
authenticators=request.authenticators,
negotiator=request.negotiator,
parser_context=request.parser_context)
ret._data = request._data
ret._files = request._files
ret._content_type = request._content_type
ret._stream = request._stream
ret._method = method
if hasattr(request, '_user'):
ret._user = request._user
if hasattr(request, '_auth'):
ret._auth = request._auth
if hasattr(request, '_authenticator'):
ret._authenticator = request._authenticator
return ret
class ForcedAuthentication(object):
"""
This authentication class is used if the test client or request factory
forcibly authenticated the request.
"""
def __init__(self, force_user, force_token):
self.force_user = force_user
self.force_token = force_token
def authenticate(self, request):
return (self.force_user, self.force_token)
class Request(object):
"""
Wrapper allowing to enhance a standard `HttpRequest` instance.
Kwargs:
- request(HttpRequest). The original request instance.
- parsers_classes(list/tuple). The parsers to use for parsing the
request content.
- authentication_classes(list/tuple). The authentications used to try
authenticating the request's user.
"""
_METHOD_PARAM = api_settings.FORM_METHOD_OVERRIDE
_CONTENT_PARAM = api_settings.FORM_CONTENT_OVERRIDE
_CONTENTTYPE_PARAM = api_settings.FORM_CONTENTTYPE_OVERRIDE
def __init__(self, request, parsers=None, authenticators=None,
negotiator=None, parser_context=None):
self._request = request
self.parsers = parsers or ()
self.authenticators = authenticators or ()
self.negotiator = negotiator or self._default_negotiator()
self.parser_context = parser_context
self._data = Empty
self._files = Empty
self._method = Empty
self._content_type = Empty
self._stream = Empty
if self.parser_context is None:
self.parser_context = {}
self.parser_context['request'] = self
self.parser_context['encoding'] = request.encoding or settings.DEFAULT_CHARSET
force_user = getattr(request, '_force_auth_user', None)
force_token = getattr(request, '_force_auth_token', None)
if (force_user is not None or force_token is not None):
forced_auth = ForcedAuthentication(force_user, force_token)
self.authenticators = (forced_auth,)
def _default_negotiator(self):
return api_settings.DEFAULT_CONTENT_NEGOTIATION_CLASS()
@property
def method(self):
"""
Returns the HTTP method.
This allows the `method` to be overridden by using a hidden `form`
field on a form POST request.
"""
if not _hasattr(self, '_method'):
self._load_method_and_content_type()
return self._method
@property
def content_type(self):
"""
Returns the content type header.
This should be used instead of `request.META.get('HTTP_CONTENT_TYPE')`,
as it allows the content type to be overridden by using a hidden form
field on a form POST request.
"""
if not _hasattr(self, '_content_type'):
self._load_method_and_content_type()
return self._content_type
@property
def stream(self):
"""
Returns an object that may be used to stream the request content.
"""
if not _hasattr(self, '_stream'):
self._load_stream()
return self._stream
@property
def QUERY_PARAMS(self):
"""
More semantically correct name for request.GET.
"""
return self._request.GET
@property
def DATA(self):
"""
Parses the request body and returns the data.
Similar to usual behaviour of `request.POST`, except that it handles
arbitrary parsers, and also works on methods other than POST (eg PUT).
"""
if not _hasattr(self, '_data'):
self._load_data_and_files()
return self._data
@property
def FILES(self):
"""
Parses the request body and returns any files uploaded in the request.
Similar to usual behaviour of `request.FILES`, except that it handles
arbitrary parsers, and also works on methods other than POST (eg PUT).
"""
if not _hasattr(self, '_files'):
self._load_data_and_files()
return self._files
@property
def user(self):
"""
Returns the user associated with the current request, as authenticated
by the authentication classes provided to the request.
"""
if not hasattr(self, '_user'):
self._authenticate()
return self._user
@user.setter
def user(self, value):
"""
Sets the user on the current request. This is necessary to maintain
compatibility with django.contrib.auth where the user property is
set in the login and logout functions.
"""
self._user = value
@property
def auth(self):
"""
Returns any non-user authentication information associated with the
request, such as an authentication token.
"""
if not hasattr(self, '_auth'):
self._authenticate()
return self._auth
@auth.setter
def auth(self, value):
"""
Sets any non-user authentication information associated with the
request, such as an authentication token.
"""
self._auth = value
@property
def successful_authenticator(self):
"""
Return the instance of the authentication instance class that was used
to authenticate the request, or `None`.
"""
if not hasattr(self, '_authenticator'):
self._authenticate()
return self._authenticator
def _load_data_and_files(self):
"""
Parses the request content into self.DATA and self.FILES.
"""
if not _hasattr(self, '_content_type'):
self._load_method_and_content_type()
if not _hasattr(self, '_data'):
self._data, self._files = self._parse()
def _load_method_and_content_type(self):
"""
Sets the method and content_type, and then check if they've
been overridden.
"""
self._content_type = self.META.get('HTTP_CONTENT_TYPE',
self.META.get('CONTENT_TYPE', ''))
self._perform_form_overloading()
if not _hasattr(self, '_method'):
self._method = self._request.method
# Allow X-HTTP-METHOD-OVERRIDE header
self._method = self.META.get('HTTP_X_HTTP_METHOD_OVERRIDE',
self._method)
def _load_stream(self):
"""
Return the content body of the request, as a stream.
"""
try:
content_length = int(self.META.get('CONTENT_LENGTH',
self.META.get('HTTP_CONTENT_LENGTH')))
except (ValueError, TypeError):
content_length = 0
if content_length == 0:
self._stream = None
elif hasattr(self._request, 'read'):
self._stream = self._request
else:
self._stream = BytesIO(self.raw_post_data)
def _perform_form_overloading(self):
"""
If this is a form POST request, then we need to check if the method and
content/content_type have been overridden by setting them in hidden
form fields or not.
"""
USE_FORM_OVERLOADING = (
self._METHOD_PARAM or
(self._CONTENT_PARAM and self._CONTENTTYPE_PARAM)
)
# We only need to use form overloading on form POST requests.
if (not USE_FORM_OVERLOADING
or self._request.method != 'POST'
or not is_form_media_type(self._content_type)):
return
# At this point we're committed to parsing the request as form data.
self._data = self._request.POST
self._files = self._request.FILES
# Method overloading - change the method and remove the param from the content.
if (self._METHOD_PARAM and
self._METHOD_PARAM in self._data):
self._method = self._data[self._METHOD_PARAM].upper()
# Content overloading - modify the content type, and force re-parse.
if (self._CONTENT_PARAM and
self._CONTENTTYPE_PARAM and
self._CONTENT_PARAM in self._data and
self._CONTENTTYPE_PARAM in self._data):
self._content_type = self._data[self._CONTENTTYPE_PARAM]
self._stream = BytesIO(self._data[self._CONTENT_PARAM].encode(self.parser_context['encoding']))
self._data, self._files = (Empty, Empty)
def _parse(self):
"""
Parse the request content, returning a two-tuple of (data, files)
May raise an `UnsupportedMediaType`, or `ParseError` exception.
"""
stream = self.stream
media_type = self.content_type
if stream is None or media_type is None:
empty_data = QueryDict('', encoding=self._request._encoding)
empty_files = MultiValueDict()
return (empty_data, empty_files)
parser = self.negotiator.select_parser(self, self.parsers)
if not parser:
raise exceptions.UnsupportedMediaType(media_type)
try:
parsed = parser.parse(stream, media_type, self.parser_context)
except:
# If we get an exception during parsing, fill in empty data and
# re-raise. Ensures we don't simply repeat the error when
# attempting to render the browsable renderer response, or when
# logging the request or similar.
self._data = QueryDict('', encoding=self._request._encoding)
self._files = MultiValueDict()
raise
# Parser classes may return the raw data, or a
# DataAndFiles object. Unpack the result as required.
try:
return (parsed.data, parsed.files)
except AttributeError:
empty_files = MultiValueDict()
return (parsed, empty_files)
def _authenticate(self):
"""
Attempt to authenticate the request using each authentication instance
in turn.
Returns a three-tuple of (authenticator, user, authtoken).
"""
for authenticator in self.authenticators:
try:
user_auth_tuple = authenticator.authenticate(self)
except exceptions.APIException:
self._not_authenticated()
raise
if not user_auth_tuple is None:
self._authenticator = authenticator
self._user, self._auth = user_auth_tuple
return
self._not_authenticated()
def _not_authenticated(self):
"""
Return a three-tuple of (authenticator, user, authtoken), representing
an unauthenticated request.
By default this will be (None, AnonymousUser, None).
"""
self._authenticator = None
if api_settings.UNAUTHENTICATED_USER:
self._user = api_settings.UNAUTHENTICATED_USER()
else:
self._user = None
if api_settings.UNAUTHENTICATED_TOKEN:
self._auth = api_settings.UNAUTHENTICATED_TOKEN()
else:
self._auth = None
def __getattr__(self, attr):
"""
Proxy other attributes to the underlying HttpRequest object.
"""
return getattr(self._request, attr)
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/rest_framework/request.py | Python | agpl-3.0 | 14,123 |
import unittest, mock, os, sys, re
from mock import *
from mock import ANY
from tests.fakes import *
import xml.etree.ElementTree as ET
from resources.utils import *
from resources.net_IO import *
from resources.scrap import *
from resources.objects import *
from resources.constants import *
def read_file(path):
with open(path, 'r') as f:
return f.read()
def read_file_as_json(path):
file_data = read_file(path)
return json.loads(file_data, encoding = 'utf-8')
def mocked_gamesdb(url, url_log=None):
mocked_json_file = ''
if 'format=brief&title=' in url:
mocked_json_file = Test_mobygames_scraper.TEST_ASSETS_DIR + "\\mobygames_castlevania_list.json"
if 'screenshots' in url:
mocked_json_file = Test_mobygames_scraper.TEST_ASSETS_DIR + "\\mobygames_castlevania_screenshots.json"
if 'covers' in url:
mocked_json_file = Test_mobygames_scraper.TEST_ASSETS_DIR + "\\mobygames_castlevania_covers.json"
if re.search('/games/(\d*)\?', url):
mocked_json_file = Test_mobygames_scraper.TEST_ASSETS_DIR + "\\mobygames_castlevania.json"
if mocked_json_file == '':
return net_get_URL(url)
print('reading mocked data from file: {}'.format(mocked_json_file))
return read_file(mocked_json_file), 200
class Test_mobygames_scraper(unittest.TestCase):
ROOT_DIR = ''
TEST_DIR = ''
TEST_ASSETS_DIR = ''
@classmethod
def setUpClass(cls):
set_log_level(LOG_DEBUG)
cls.TEST_DIR = os.path.dirname(os.path.abspath(__file__))
cls.ROOT_DIR = os.path.abspath(os.path.join(cls.TEST_DIR, os.pardir))
cls.TEST_ASSETS_DIR = os.path.abspath(os.path.join(cls.TEST_DIR,'assets/'))
print('ROOT DIR: {}'.format(cls.ROOT_DIR))
print('TEST DIR: {}'.format(cls.TEST_DIR))
print('TEST ASSETS DIR: {}'.format(cls.TEST_ASSETS_DIR))
print('---------------------------------------------------------------------------')
def get_test_settings(self):
settings = {}
settings['scan_metadata_policy'] = 3 # OnlineScraper only
settings['scan_asset_policy'] = 0
settings['metadata_scraper_mode'] = 1
settings['asset_scraper_mode'] = 1
settings['scan_clean_tags'] = True
settings['scan_ignore_scrap_title'] = False
settings['scraper_metadata'] = 0 # NullScraper
settings['scraper_mobygames_apikey'] = 'abc123'
settings['escape_romfile'] = False
return settings
@patch('resources.scrap.net_get_URL', side_effect = mocked_gamesdb)
def test_scraping_metadata_for_game(self, mock_json_downloader):
# arrange
settings = self.get_test_settings()
status_dic = {}
status_dic['status'] = True
target = MobyGames(settings)
# act
candidates = target.get_candidates('castlevania', 'castlevania', 'Nintendo NES', status_dic)
actual = target.get_metadata(candidates[0], status_dic)
# assert
self.assertTrue(actual)
self.assertEqual(u'Castlevania', actual['title'])
print(actual)
# add actual mobygames apikey above and comment out patch attributes to do live tests
@patch('resources.scrap.net_get_URL', side_effect = mocked_gamesdb)
@patch('resources.scrap.net_download_img')
def test_scraping_assets_for_game(self, mock_img_downloader, mock_json_downloader):
# arrange
settings = self.get_test_settings()
status_dic = {}
status_dic['status'] = True
target = MobyGames(settings)
assets_to_scrape = [
g_assetFactory.get_asset_info(ASSET_BOXFRONT_ID),
g_assetFactory.get_asset_info(ASSET_BOXBACK_ID),
g_assetFactory.get_asset_info(ASSET_SNAP_ID)]
# act
actuals = []
candidates = target.get_candidates('castlevania', 'castlevania', 'Nintendo NES', status_dic)
for asset_to_scrape in assets_to_scrape:
an_actual = target.get_assets(candidates[0], asset_to_scrape, status_dic)
actuals.append(an_actual)
# assert
for actual in actuals:
self.assertTrue(actual)
print(actuals) | chrisism/plugin.program.advanced.emulator.launcher | tests/mobygames_scraper_test.py | Python | gpl-2.0 | 4,354 |
from django.utils import translation
from nose.tools import eq_
from olympia import amo
from olympia.amo.tests import TestCase, ESTestCase
from olympia.addons.models import Addon
from olympia.reviews import tasks
from olympia.reviews.models import (
check_spam, GroupedRating, Review, ReviewFlag, Spam)
from olympia.users.models import UserProfile
class TestReviewModel(TestCase):
fixtures = ['reviews/test_models']
def test_translations(self):
translation.activate('en-US')
# There's en-US and de translations. We should get en-US.
r1 = Review.objects.get(id=1)
self.trans_eq(r1.title, 'r1 title en', 'en-US')
# There's only a de translation, so we get that.
r2 = Review.objects.get(id=2)
self.trans_eq(r2.title, 'r2 title de', 'de')
translation.activate('de')
# en and de exist, we get de.
r1 = Review.objects.get(id=1)
self.trans_eq(r1.title, 'r1 title de', 'de')
# There's only a de translation, so we get that.
r2 = Review.objects.get(id=2)
self.trans_eq(r2.title, 'r2 title de', 'de')
def test_soft_delete(self):
eq_(Review.objects.count(), 2)
eq_(Review.unfiltered.count(), 2)
Review.objects.get(id=1).delete()
eq_(Review.objects.count(), 1)
eq_(Review.unfiltered.count(), 2)
Review.objects.filter(id=2).delete()
eq_(Review.objects.count(), 0)
eq_(Review.unfiltered.count(), 2)
def test_filter_for_many_to_many(self):
# Check https://bugzilla.mozilla.org/show_bug.cgi?id=1142035.
review = Review.objects.get(id=1)
addon = review.addon
assert review in addon._reviews.all()
# Delete the review: it shouldn't be listed anymore.
review.update(deleted=True)
addon = Addon.objects.get(pk=addon.pk)
assert review not in addon._reviews.all()
def test_no_filter_for_relations(self):
# Check https://bugzilla.mozilla.org/show_bug.cgi?id=1142035.
review = Review.objects.get(id=1)
flag = ReviewFlag.objects.create(review=review,
flag='review_flag_reason_spam')
assert flag.review == review
# Delete the review: reviewflag.review should still work.
review.update(deleted=True)
flag = ReviewFlag.objects.get(pk=flag.pk)
assert flag.review == review
class TestGroupedRating(TestCase):
fixtures = ['reviews/dev-reply']
grouped_ratings = [(1, 0), (2, 0), (3, 0), (4, 1), (5, 0)]
def test_get_none(self):
eq_(GroupedRating.get(3, update_none=False), None)
def test_set(self):
eq_(GroupedRating.get(1865, update_none=False), None)
GroupedRating.set(1865)
eq_(GroupedRating.get(1865, update_none=False), self.grouped_ratings)
def test_cron(self):
eq_(GroupedRating.get(1865, update_none=False), None)
tasks.addon_grouped_rating(1865)
eq_(GroupedRating.get(1865, update_none=False), self.grouped_ratings)
def test_update_none(self):
eq_(GroupedRating.get(1865, update_none=False), None)
eq_(GroupedRating.get(1865, update_none=True), self.grouped_ratings)
class TestSpamTest(TestCase):
fixtures = ['reviews/test_models']
def test_create_not_there(self):
Review.objects.all().delete()
eq_(Review.objects.count(), 0)
check_spam(1)
def test_add(self):
assert Spam().add(Review.objects.all()[0], 'numbers')
class TestRefreshTest(ESTestCase):
fixtures = ['base/users']
def setUp(self):
super(TestRefreshTest, self).setUp()
self.addon = Addon.objects.create(type=amo.ADDON_EXTENSION)
self.user = UserProfile.objects.all()[0]
self.refresh()
eq_(self.get_bayesian_rating(), 0.0)
def get_bayesian_rating(self):
q = Addon.search().filter(id=self.addon.id)
return list(q.values_dict('bayesian_rating'))[0]['bayesian_rating'][0]
def test_created(self):
eq_(self.get_bayesian_rating(), 0.0)
Review.objects.create(addon=self.addon, user=self.user, rating=4)
self.refresh()
eq_(self.get_bayesian_rating(), 4.0)
def test_edited(self):
self.test_created()
r = self.addon.reviews.all()[0]
r.rating = 1
r.save()
self.refresh()
eq_(self.get_bayesian_rating(), 2.5)
def test_deleted(self):
self.test_created()
r = self.addon.reviews.all()[0]
r.delete()
self.refresh()
eq_(self.get_bayesian_rating(), 0.0)
| jpetto/olympia | src/olympia/reviews/tests/test_models.py | Python | bsd-3-clause | 4,623 |
"""passlib.apache - apache password support"""
# XXX: relocate this to passlib.ext.apache?
#=============================================================================
# imports
#=============================================================================
from __future__ import with_statement
# core
from hashlib import md5
import logging; log = logging.getLogger(__name__)
import os
import sys
from warnings import warn
# site
# pkg
from passlib.context import CryptContext
from passlib.exc import ExpectedStringError
from passlib.hash import htdigest
from passlib.utils import consteq, render_bytes, to_bytes, deprecated_method, is_ascii_codec
from passlib.utils.compat import b, bytes, join_bytes, str_to_bascii, u, \
unicode, BytesIO, iteritems, imap, PY3
# local
__all__ = [
'HtpasswdFile',
'HtdigestFile',
]
#=============================================================================
# constants & support
#=============================================================================
_UNSET = object()
_BCOLON = b(":")
# byte values that aren't allowed in fields.
_INVALID_FIELD_CHARS = b(":\n\r\t\x00")
#=============================================================================
# backport of OrderedDict for PY2.5
#=============================================================================
try:
from collections import OrderedDict
except ImportError:
# Python 2.5
class OrderedDict(dict):
"""hacked OrderedDict replacement.
NOTE: this doesn't provide a full OrderedDict implementation,
just the minimum needed by the Htpasswd internals.
"""
def __init__(self):
self._keys = []
def __iter__(self):
return iter(self._keys)
def __setitem__(self, key, value):
if key not in self:
self._keys.append(key)
super(OrderedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(OrderedDict, self).__delitem__(key)
self._keys.remove(key)
def iteritems(self):
return ((key, self[key]) for key in self)
# these aren't used or implemented, so disabling them for safety.
update = pop = popitem = clear = keys = iterkeys = None
#=============================================================================
# common helpers
#=============================================================================
class _CommonFile(object):
"""common framework for HtpasswdFile & HtdigestFile"""
#===================================================================
# instance attrs
#===================================================================
# charset encoding used by file (defaults to utf-8)
encoding = None
# whether users() and other public methods should return unicode or bytes?
# (defaults to False under PY2, True under PY3)
return_unicode = None
# if bound to local file, these will be set.
_path = None # local file path
_mtime = None # mtime when last loaded, or 0
# if true, automatically save to local file after changes are made.
autosave = False
# ordered dict mapping key -> value for all records in database.
# (e.g. user => hash for Htpasswd)
_records = None
#===================================================================
# alt constuctors
#===================================================================
@classmethod
def from_string(cls, data, **kwds):
"""create new object from raw string.
:type data: unicode or bytes
:arg data:
database to load, as single string.
:param \*\*kwds:
all other keywords are the same as in the class constructor
"""
if 'path' in kwds:
raise TypeError("'path' not accepted by from_string()")
self = cls(**kwds)
self.load_string(data)
return self
@classmethod
def from_path(cls, path, **kwds):
"""create new object from file, without binding object to file.
:type path: str
:arg path:
local filepath to load from
:param \*\*kwds:
all other keywords are the same as in the class constructor
"""
self = cls(**kwds)
self.load(path)
return self
#===================================================================
# init
#===================================================================
def __init__(self, path=None, new=False, autoload=True, autosave=False,
encoding="utf-8", return_unicode=PY3,
):
# set encoding
if not encoding:
warn("``encoding=None`` is deprecated as of Passlib 1.6, "
"and will cause a ValueError in Passlib 1.8, "
"use ``return_unicode=False`` instead.",
DeprecationWarning, stacklevel=2)
encoding = "utf-8"
return_unicode = False
elif not is_ascii_codec(encoding):
# htpasswd/htdigest files assumes 1-byte chars, and use ":" separator,
# so only ascii-compatible encodings are allowed.
raise ValueError("encoding must be 7-bit ascii compatible")
self.encoding = encoding
# set other attrs
self.return_unicode = return_unicode
self.autosave = autosave
self._path = path
self._mtime = 0
# init db
if not autoload:
warn("``autoload=False`` is deprecated as of Passlib 1.6, "
"and will be removed in Passlib 1.8, use ``new=True`` instead",
DeprecationWarning, stacklevel=2)
new = True
if path and not new:
self.load()
else:
self._records = OrderedDict()
def __repr__(self):
tail = ''
if self.autosave:
tail += ' autosave=True'
if self._path:
tail += ' path=%r' % self._path
if self.encoding != "utf-8":
tail += ' encoding=%r' % self.encoding
return "<%s 0x%0x%s>" % (self.__class__.__name__, id(self), tail)
# NOTE: ``path`` is a property so that ``_mtime`` is wiped when it's set.
def _get_path(self):
return self._path
def _set_path(self, value):
if value != self._path:
self._mtime = 0
self._path = value
path = property(_get_path, _set_path)
@property
def mtime(self):
"""modify time when last loaded (if bound to a local file)"""
return self._mtime
#===================================================================
# loading
#===================================================================
def load_if_changed(self):
"""Reload from ``self.path`` only if file has changed since last load"""
if not self._path:
raise RuntimeError("%r is not bound to a local file" % self)
if self._mtime and self._mtime == os.path.getmtime(self._path):
return False
self.load()
return True
def load(self, path=None, force=True):
"""Load state from local file.
If no path is specified, attempts to load from ``self.path``.
:type path: str
:arg path: local file to load from
:type force: bool
:param force:
if ``force=False``, only load from ``self.path`` if file
has changed since last load.
.. deprecated:: 1.6
This keyword will be removed in Passlib 1.8;
Applications should use :meth:`load_if_changed` instead.
"""
if path is not None:
with open(path, "rb") as fh:
self._mtime = 0
self._load_lines(fh)
elif not force:
warn("%(name)s.load(force=False) is deprecated as of Passlib 1.6,"
"and will be removed in Passlib 1.8; "
"use %(name)s.load_if_changed() instead." %
dict(name=self.__class__.__name__),
DeprecationWarning, stacklevel=2)
return self.load_if_changed()
elif self._path:
with open(self._path, "rb") as fh:
self._mtime = os.path.getmtime(self._path)
self._load_lines(fh)
else:
raise RuntimeError("%s().path is not set, an explicit path is required" %
self.__class__.__name__)
return True
def load_string(self, data):
"""Load state from unicode or bytes string, replacing current state"""
data = to_bytes(data, self.encoding, "data")
self._mtime = 0
self._load_lines(BytesIO(data))
def _load_lines(self, lines):
"""load from sequence of lists"""
# XXX: found reference that "#" comment lines may be supported by
# htpasswd, should verify this, and figure out how to handle them.
# if true, this would also affect what can be stored in user field.
# XXX: if multiple entries for a key, should we use the first one
# or the last one? going w/ first entry for now.
# XXX: how should this behave if parsing fails? currently
# it will contain everything that was loaded up to error.
# could clear / restore old state instead.
parse = self._parse_record
records = self._records = OrderedDict()
for idx, line in enumerate(lines):
key, value = parse(line, idx+1)
if key not in records:
records[key] = value
def _parse_record(self, record, lineno): # pragma: no cover - abstract method
"""parse line of file into (key, value) pair"""
raise NotImplementedError("should be implemented in subclass")
#===================================================================
# saving
#===================================================================
def _autosave(self):
"""subclass helper to call save() after any changes"""
if self.autosave and self._path:
self.save()
def save(self, path=None):
"""Save current state to file.
If no path is specified, attempts to save to ``self.path``.
"""
if path is not None:
with open(path, "wb") as fh:
fh.writelines(self._iter_lines())
elif self._path:
self.save(self._path)
self._mtime = os.path.getmtime(self._path)
else:
raise RuntimeError("%s().path is not set, cannot autosave" %
self.__class__.__name__)
def to_string(self):
"""Export current state as a string of bytes"""
return join_bytes(self._iter_lines())
def _iter_lines(self):
"""iterator yielding lines of database"""
return (self._render_record(key,value) for key,value in iteritems(self._records))
def _render_record(self, key, value): # pragma: no cover - abstract method
"""given key/value pair, encode as line of file"""
raise NotImplementedError("should be implemented in subclass")
#===================================================================
# field encoding
#===================================================================
def _encode_user(self, user):
"""user-specific wrapper for _encode_field()"""
return self._encode_field(user, "user")
def _encode_realm(self, realm): # pragma: no cover - abstract method
"""realm-specific wrapper for _encode_field()"""
return self._encode_field(realm, "realm")
def _encode_field(self, value, param="field"):
"""convert field to internal representation.
internal representation is always bytes. byte strings are left as-is,
unicode strings encoding using file's default encoding (or ``utf-8``
if no encoding has been specified).
:raises UnicodeEncodeError:
if unicode value cannot be encoded using default encoding.
:raises ValueError:
if resulting byte string contains a forbidden character,
or is too long (>255 bytes).
:returns:
encoded identifer as bytes
"""
if isinstance(value, unicode):
value = value.encode(self.encoding)
elif not isinstance(value, bytes):
raise ExpectedStringError(value, param)
if len(value) > 255:
raise ValueError("%s must be at most 255 characters: %r" %
(param, value))
if any(c in _INVALID_FIELD_CHARS for c in value):
raise ValueError("%s contains invalid characters: %r" %
(param, value,))
return value
def _decode_field(self, value):
"""decode field from internal representation to format
returns by users() method, etc.
:raises UnicodeDecodeError:
if unicode value cannot be decoded using default encoding.
(usually indicates wrong encoding set for file).
:returns:
field as unicode or bytes, as appropriate.
"""
assert isinstance(value, bytes), "expected value to be bytes"
if self.return_unicode:
return value.decode(self.encoding)
else:
return value
# FIXME: htpasswd doc says passwords limited to 255 chars under Windows & MPE,
# and that longer ones are truncated. this may be side-effect of those
# platforms supporting the 'plaintext' scheme. these classes don't currently
# check for this.
#===================================================================
# eoc
#===================================================================
#=============================================================================
# htpasswd editing
#=============================================================================
#: default CryptContext used by HtpasswdFile
# TODO: update this to support everything in host_context (where available),
# and note in the documentation that the default is no longer guaranteed to be portable
# across platforms.
# c.f. http://httpd.apache.org/docs/2.2/programs/htpasswd.html
htpasswd_context = CryptContext([
# man page notes supported everywhere; is default on Windows, Netware, TPF
"apr_md5_crypt",
# [added in passlib 1.6.3]
# apache requires host crypt() support; but can generate natively
# (as of https://bz.apache.org/bugzilla/show_bug.cgi?id=49288)
"bcrypt",
# [added in passlib 1.6.3]
# apache requires host crypt() support; and can't generate natively
"sha256_crypt",
"sha512_crypt",
# man page notes apache does NOT support this on Windows, Netware, TPF
"des_crypt",
# man page notes intended only for transitioning htpasswd <-> ldap
"ldap_sha1",
# man page notes apache ONLY supports this on Windows, Netware, TPF
"plaintext"
])
#: scheme that will be used when 'portable' is requested.
portable_scheme = "apr_md5_crypt"
class HtpasswdFile(_CommonFile):
"""class for reading & writing Htpasswd files.
The class constructor accepts the following arguments:
:type path: filepath
:param path:
Specifies path to htpasswd file, use to implicitly load from and save to.
This class has two modes of operation:
1. It can be "bound" to a local file by passing a ``path`` to the class
constructor. In this case it will load the contents of the file when
created, and the :meth:`load` and :meth:`save` methods will automatically
load from and save to that file if they are called without arguments.
2. Alternately, it can exist as an independant object, in which case
:meth:`load` and :meth:`save` will require an explicit path to be
provided whenever they are called. As well, ``autosave`` behavior
will not be available.
This feature is new in Passlib 1.6, and is the default if no
``path`` value is provided to the constructor.
This is also exposed as a readonly instance attribute.
:type new: bool
:param new:
Normally, if *path* is specified, :class:`HtpasswdFile` will
immediately load the contents of the file. However, when creating
a new htpasswd file, applications can set ``new=True`` so that
the existing file (if any) will not be loaded.
.. versionadded:: 1.6
This feature was previously enabled by setting ``autoload=False``.
That alias has been deprecated, and will be removed in Passlib 1.8
:type autosave: bool
:param autosave:
Normally, any changes made to an :class:`HtpasswdFile` instance
will not be saved until :meth:`save` is explicitly called. However,
if ``autosave=True`` is specified, any changes made will be
saved to disk immediately (assuming *path* has been set).
This is also exposed as a writeable instance attribute.
:type encoding: str
:param encoding:
Optionally specify character encoding used to read/write file
and hash passwords. Defaults to ``utf-8``, though ``latin-1``
is the only other commonly encountered encoding.
This is also exposed as a readonly instance attribute.
:type default_scheme: str
:param default_scheme:
Optionally specify default scheme to use when encoding new passwords.
May be any of ``"bcrypt"``, ``"sha256_crypt"``, ``"apr_md5_crypt"``, ``"des_crypt"``,
``"ldap_sha1"``, ``"plaintext"``. It defaults to ``"apr_md5_crypt"``.
.. note::
Some hashes are only supported by apache / htpasswd on certain operating systems
(e.g. bcrypt on BSD, sha256_crypt on linux). To get the strongest
hash that's still portable, applications can specify ``default_scheme="portable"``.
.. versionadded:: 1.6
This keyword was previously named ``default``. That alias
has been deprecated, and will be removed in Passlib 1.8.
.. versionchanged:: 1.6.3
Added support for ``"bcrypt"``, ``"sha256_crypt"``, and ``"portable"``.
:type context: :class:`~passlib.context.CryptContext`
:param context:
:class:`!CryptContext` instance used to encrypt
and verify the hashes found in the htpasswd file.
The default value is a pre-built context which supports all
of the hashes officially allowed in an htpasswd file.
This is also exposed as a readonly instance attribute.
.. warning::
This option may be used to add support for non-standard hash
formats to an htpasswd file. However, the resulting file
will probably not be usable by another application,
and particularly not by Apache.
:param autoload:
Set to ``False`` to prevent the constructor from automatically
loaded the file from disk.
.. deprecated:: 1.6
This has been replaced by the *new* keyword.
Instead of setting ``autoload=False``, you should use
``new=True``. Support for this keyword will be removed
in Passlib 1.8.
:param default:
Change the default algorithm used to encrypt new passwords.
.. deprecated:: 1.6
This has been renamed to *default_scheme* for clarity.
Support for this alias will be removed in Passlib 1.8.
Loading & Saving
================
.. automethod:: load
.. automethod:: load_if_changed
.. automethod:: load_string
.. automethod:: save
.. automethod:: to_string
Inspection
================
.. automethod:: users
.. automethod:: check_password
.. automethod:: get_hash
Modification
================
.. automethod:: set_password
.. automethod:: delete
Alternate Constructors
======================
.. automethod:: from_string
Attributes
==========
.. attribute:: path
Path to local file that will be used as the default
for all :meth:`load` and :meth:`save` operations.
May be written to, initialized by the *path* constructor keyword.
.. attribute:: autosave
Writeable flag indicating whether changes will be automatically
written to *path*.
Errors
======
:raises ValueError:
All of the methods in this class will raise a :exc:`ValueError` if
any user name contains a forbidden character (one of ``:\\r\\n\\t\\x00``),
or is longer than 255 characters.
"""
#===================================================================
# instance attrs
#===================================================================
# NOTE: _records map stores <user> for the key, and <hash> for the value,
# both in bytes which use self.encoding
#===================================================================
# init & serialization
#===================================================================
def __init__(self, path=None, default_scheme=None, context=htpasswd_context,
**kwds):
if 'default' in kwds:
warn("``default`` is deprecated as of Passlib 1.6, "
"and will be removed in Passlib 1.8, it has been renamed "
"to ``default_scheem``.",
DeprecationWarning, stacklevel=2)
default_scheme = kwds.pop("default")
if default_scheme:
if default_scheme == "portable":
default_scheme = portable_scheme
context = context.copy(default=default_scheme)
self.context = context
super(HtpasswdFile, self).__init__(path, **kwds)
def _parse_record(self, record, lineno):
# NOTE: should return (user, hash) tuple
result = record.rstrip().split(_BCOLON)
if len(result) != 2:
raise ValueError("malformed htpasswd file (error reading line %d)"
% lineno)
return result
def _render_record(self, user, hash):
return render_bytes("%s:%s\n", user, hash)
#===================================================================
# public methods
#===================================================================
def users(self):
"""Return list of all users in database"""
return [self._decode_field(user) for user in self._records]
##def has_user(self, user):
## "check whether entry is present for user"
## return self._encode_user(user) in self._records
##def rename(self, old, new):
## """rename user account"""
## old = self._encode_user(old)
## new = self._encode_user(new)
## hash = self._records.pop(old)
## self._records[new] = hash
## self._autosave()
def set_password(self, user, password):
"""Set password for user; adds user if needed.
:returns:
* ``True`` if existing user was updated.
* ``False`` if user account was added.
.. versionchanged:: 1.6
This method was previously called ``update``, it was renamed
to prevent ambiguity with the dictionary method.
The old alias is deprecated, and will be removed in Passlib 1.8.
"""
user = self._encode_user(user)
hash = self.context.encrypt(password)
if PY3:
hash = hash.encode(self.encoding)
existing = (user in self._records)
self._records[user] = hash
self._autosave()
return existing
@deprecated_method(deprecated="1.6", removed="1.8",
replacement="set_password")
def update(self, user, password):
"""set password for user"""
return self.set_password(user, password)
def get_hash(self, user):
"""Return hash stored for user, or ``None`` if user not found.
.. versionchanged:: 1.6
This method was previously named ``find``, it was renamed
for clarity. The old name is deprecated, and will be removed
in Passlib 1.8.
"""
try:
return self._records[self._encode_user(user)]
except KeyError:
return None
@deprecated_method(deprecated="1.6", removed="1.8",
replacement="get_hash")
def find(self, user):
"""return hash for user"""
return self.get_hash(user)
# XXX: rename to something more explicit, like delete_user()?
def delete(self, user):
"""Delete user's entry.
:returns:
* ``True`` if user deleted.
* ``False`` if user not found.
"""
try:
del self._records[self._encode_user(user)]
except KeyError:
return False
self._autosave()
return True
def check_password(self, user, password):
"""Verify password for specified user.
:returns:
* ``None`` if user not found.
* ``False`` if user found, but password does not match.
* ``True`` if user found and password matches.
.. versionchanged:: 1.6
This method was previously called ``verify``, it was renamed
to prevent ambiguity with the :class:`!CryptContext` method.
The old alias is deprecated, and will be removed in Passlib 1.8.
"""
user = self._encode_user(user)
hash = self._records.get(user)
if hash is None:
return None
if isinstance(password, unicode):
# NOTE: encoding password to match file, making the assumption
# that server will use same encoding to hash the password.
password = password.encode(self.encoding)
ok, new_hash = self.context.verify_and_update(password, hash)
if ok and new_hash is not None:
# rehash user's password if old hash was deprecated
self._records[user] = new_hash
self._autosave()
return ok
@deprecated_method(deprecated="1.6", removed="1.8",
replacement="check_password")
def verify(self, user, password):
"""verify password for user"""
return self.check_password(user, password)
#===================================================================
# eoc
#===================================================================
#=============================================================================
# htdigest editing
#=============================================================================
class HtdigestFile(_CommonFile):
"""class for reading & writing Htdigest files.
The class constructor accepts the following arguments:
:type path: filepath
:param path:
Specifies path to htdigest file, use to implicitly load from and save to.
This class has two modes of operation:
1. It can be "bound" to a local file by passing a ``path`` to the class
constructor. In this case it will load the contents of the file when
created, and the :meth:`load` and :meth:`save` methods will automatically
load from and save to that file if they are called without arguments.
2. Alternately, it can exist as an independant object, in which case
:meth:`load` and :meth:`save` will require an explicit path to be
provided whenever they are called. As well, ``autosave`` behavior
will not be available.
This feature is new in Passlib 1.6, and is the default if no
``path`` value is provided to the constructor.
This is also exposed as a readonly instance attribute.
:type default_realm: str
:param default_realm:
If ``default_realm`` is set, all the :class:`HtdigestFile`
methods that require a realm will use this value if one is not
provided explicitly. If unset, they will raise an error stating
that an explicit realm is required.
This is also exposed as a writeable instance attribute.
.. versionadded:: 1.6
:type new: bool
:param new:
Normally, if *path* is specified, :class:`HtdigestFile` will
immediately load the contents of the file. However, when creating
a new htpasswd file, applications can set ``new=True`` so that
the existing file (if any) will not be loaded.
.. versionadded:: 1.6
This feature was previously enabled by setting ``autoload=False``.
That alias has been deprecated, and will be removed in Passlib 1.8
:type autosave: bool
:param autosave:
Normally, any changes made to an :class:`HtdigestFile` instance
will not be saved until :meth:`save` is explicitly called. However,
if ``autosave=True`` is specified, any changes made will be
saved to disk immediately (assuming *path* has been set).
This is also exposed as a writeable instance attribute.
:type encoding: str
:param encoding:
Optionally specify character encoding used to read/write file
and hash passwords. Defaults to ``utf-8``, though ``latin-1``
is the only other commonly encountered encoding.
This is also exposed as a readonly instance attribute.
:param autoload:
Set to ``False`` to prevent the constructor from automatically
loaded the file from disk.
.. deprecated:: 1.6
This has been replaced by the *new* keyword.
Instead of setting ``autoload=False``, you should use
``new=True``. Support for this keyword will be removed
in Passlib 1.8.
Loading & Saving
================
.. automethod:: load
.. automethod:: load_if_changed
.. automethod:: load_string
.. automethod:: save
.. automethod:: to_string
Inspection
==========
.. automethod:: realms
.. automethod:: users
.. automethod:: check_password(user[, realm], password)
.. automethod:: get_hash
Modification
============
.. automethod:: set_password(user[, realm], password)
.. automethod:: delete
.. automethod:: delete_realm
Alternate Constructors
======================
.. automethod:: from_string
Attributes
==========
.. attribute:: default_realm
The default realm that will be used if one is not provided
to methods that require it. By default this is ``None``,
in which case an explicit realm must be provided for every
method call. Can be written to.
.. attribute:: path
Path to local file that will be used as the default
for all :meth:`load` and :meth:`save` operations.
May be written to, initialized by the *path* constructor keyword.
.. attribute:: autosave
Writeable flag indicating whether changes will be automatically
written to *path*.
Errors
======
:raises ValueError:
All of the methods in this class will raise a :exc:`ValueError` if
any user name or realm contains a forbidden character (one of ``:\\r\\n\\t\\x00``),
or is longer than 255 characters.
"""
#===================================================================
# instance attrs
#===================================================================
# NOTE: _records map stores (<user>,<realm>) for the key,
# and <hash> as the value, all as <self.encoding> bytes.
# NOTE: unlike htpasswd, this class doesn't use a CryptContext,
# as only one hash format is supported: htdigest.
# optionally specify default realm that will be used if none
# is provided to a method call. otherwise realm is always required.
default_realm = None
#===================================================================
# init & serialization
#===================================================================
def __init__(self, path=None, default_realm=None, **kwds):
self.default_realm = default_realm
super(HtdigestFile, self).__init__(path, **kwds)
def _parse_record(self, record, lineno):
result = record.rstrip().split(_BCOLON)
if len(result) != 3:
raise ValueError("malformed htdigest file (error reading line %d)"
% lineno)
user, realm, hash = result
return (user, realm), hash
def _render_record(self, key, hash):
user, realm = key
return render_bytes("%s:%s:%s\n", user, realm, hash)
def _encode_realm(self, realm):
# override default _encode_realm to fill in default realm field
if realm is None:
realm = self.default_realm
if realm is None:
raise TypeError("you must specify a realm explicitly, "
"or set the default_realm attribute")
return self._encode_field(realm, "realm")
#===================================================================
# public methods
#===================================================================
def realms(self):
"""Return list of all realms in database"""
realms = set(key[1] for key in self._records)
return [self._decode_field(realm) for realm in realms]
def users(self, realm=None):
"""Return list of all users in specified realm.
* uses ``self.default_realm`` if no realm explicitly provided.
* returns empty list if realm not found.
"""
realm = self._encode_realm(realm)
return [self._decode_field(key[0]) for key in self._records
if key[1] == realm]
##def has_user(self, user, realm=None):
## "check if user+realm combination exists"
## user = self._encode_user(user)
## realm = self._encode_realm(realm)
## return (user,realm) in self._records
##def rename_realm(self, old, new):
## """rename all accounts in realm"""
## old = self._encode_realm(old)
## new = self._encode_realm(new)
## keys = [key for key in self._records if key[1] == old]
## for key in keys:
## hash = self._records.pop(key)
## self._records[key[0],new] = hash
## self._autosave()
## return len(keys)
##def rename(self, old, new, realm=None):
## """rename user account"""
## old = self._encode_user(old)
## new = self._encode_user(new)
## realm = self._encode_realm(realm)
## hash = self._records.pop((old,realm))
## self._records[new,realm] = hash
## self._autosave()
def set_password(self, user, realm=None, password=_UNSET):
"""Set password for user; adds user & realm if needed.
If ``self.default_realm`` has been set, this may be called
with the syntax ``set_password(user, password)``,
otherwise it must be called with all three arguments:
``set_password(user, realm, password)``.
:returns:
* ``True`` if existing user was updated
* ``False`` if user account added.
"""
if password is _UNSET:
# called w/ two args - (user, password), use default realm
realm, password = None, realm
user = self._encode_user(user)
realm = self._encode_realm(realm)
key = (user, realm)
existing = (key in self._records)
hash = htdigest.encrypt(password, user, realm, encoding=self.encoding)
if PY3:
hash = hash.encode(self.encoding)
self._records[key] = hash
self._autosave()
return existing
@deprecated_method(deprecated="1.6", removed="1.8",
replacement="set_password")
def update(self, user, realm, password):
"""set password for user"""
return self.set_password(user, realm, password)
# XXX: rename to something more explicit, like get_hash()?
def get_hash(self, user, realm=None):
"""Return :class:`~passlib.hash.htdigest` hash stored for user.
* uses ``self.default_realm`` if no realm explicitly provided.
* returns ``None`` if user or realm not found.
.. versionchanged:: 1.6
This method was previously named ``find``, it was renamed
for clarity. The old name is deprecated, and will be removed
in Passlib 1.8.
"""
key = (self._encode_user(user), self._encode_realm(realm))
hash = self._records.get(key)
if hash is None:
return None
if PY3:
hash = hash.decode(self.encoding)
return hash
@deprecated_method(deprecated="1.6", removed="1.8",
replacement="get_hash")
def find(self, user, realm):
"""return hash for user"""
return self.get_hash(user, realm)
# XXX: rename to something more explicit, like delete_user()?
def delete(self, user, realm=None):
"""Delete user's entry for specified realm.
if realm is not specified, uses ``self.default_realm``.
:returns:
* ``True`` if user deleted,
* ``False`` if user not found in realm.
"""
key = (self._encode_user(user), self._encode_realm(realm))
try:
del self._records[key]
except KeyError:
return False
self._autosave()
return True
def delete_realm(self, realm):
"""Delete all users for specified realm.
if realm is not specified, uses ``self.default_realm``.
:returns: number of users deleted (0 if realm not found)
"""
realm = self._encode_realm(realm)
records = self._records
keys = [key for key in records if key[1] == realm]
for key in keys:
del records[key]
self._autosave()
return len(keys)
def check_password(self, user, realm=None, password=_UNSET):
"""Verify password for specified user + realm.
If ``self.default_realm`` has been set, this may be called
with the syntax ``check_password(user, password)``,
otherwise it must be called with all three arguments:
``check_password(user, realm, password)``.
:returns:
* ``None`` if user or realm not found.
* ``False`` if user found, but password does not match.
* ``True`` if user found and password matches.
.. versionchanged:: 1.6
This method was previously called ``verify``, it was renamed
to prevent ambiguity with the :class:`!CryptContext` method.
The old alias is deprecated, and will be removed in Passlib 1.8.
"""
if password is _UNSET:
# called w/ two args - (user, password), use default realm
realm, password = None, realm
user = self._encode_user(user)
realm = self._encode_realm(realm)
hash = self._records.get((user,realm))
if hash is None:
return None
return htdigest.verify(password, hash, user, realm,
encoding=self.encoding)
@deprecated_method(deprecated="1.6", removed="1.8",
replacement="check_password")
def verify(self, user, realm, password):
"""verify password for user"""
return self.check_password(user, realm, password)
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#=============================================================================
| cgstudiomap/cgstudiomap | main/eggs/passlib-1.6.5-py2.7.egg/passlib/apache.py | Python | agpl-3.0 | 39,957 |
from os.path import dirname
"""
Process MPDatClient responses
"""
def get_files_and_dirs_from_db(items):
"""
Returns (files, directories) from a source with files and directories mixed.
"""
files = []
dirs = []
for item in items:
if "directory" in item:
dirs.append(item["directory"])
elif "file" in item:
files.append(item["file"])
return (files, dirs)
def process_song(item):
"""
Adds a "dir" attribute to songs, change "pos" to int
"""
if "file" in item:
item["dir"] = dirname(item["file"])
if "pos" in item:
item["pos"] = int(item["pos"])
if "time" in item:
item["time"] = int(item["time"])
return item
| laurentb/mpdat | mpdat/process.py | Python | mit | 735 |
# -*- coding: utf-8 -*-
"""
Appends the codecov token to the 'codecov.yml' file at the root of the repository.
This is done by CI during PRs and builds on the pytest-dev repository so we can upload coverage, at least
until codecov grows some native integration like it has with Travis and AppVeyor.
See discussion in https://github.com/pytest-dev/pytest/pull/6441 for more information.
"""
import os.path
from textwrap import dedent
def main():
this_dir = os.path.dirname(__file__)
cov_file = os.path.join(this_dir, "..", "codecov.yml")
assert os.path.isfile(cov_file), "{cov_file} does not exist".format(
cov_file=cov_file
)
with open(cov_file, "a") as f:
# token from: https://codecov.io/gh/pytest-dev/pytest/settings
# use same URL to regenerate it if needed
text = dedent(
"""
codecov:
token: "1eca3b1f-31a2-4fb8-a8c3-138b441b50a7"
"""
)
f.write(text)
print("Token updated:", cov_file)
if __name__ == "__main__":
main()
| cloudera/hue | desktop/core/ext-py/pytest-4.6.11/scripts/append_codecov_token.py | Python | apache-2.0 | 1,053 |
# Ethan Busbee
# BARQ: Busbee's Assorted Reusable Queues
from heapq import heappop, heappush
class PriorityQueue(object):
queue = []
size = 0
def __init__(self):
self.queue = []
self.size = 0
def __del__(self):
self.queue = []
self.size = 0
# temporarily only supporting one item of data
def push(self, weight=0, data=None):
try: weight = int(weight)
except: weight, data = data, weight
self.size = self.size + 1
heappush(self.queue, (weight, data))
# print "BARQ: pushed " + str((weight, data))
def pop(self):
if self.size > 0:
print "BARQ: normal pop"
self.size = self.size - 1
return heappop(self.queue)
else: return None
# else: print "BARQ: empty stack pop"; return None
# if len(data) > 1: return (weight, data)
# else: return (weight, data[0])
def count(self):
return self.size
def empty(self):
self.__del__()
def testPQ():
pq = PriorityQueue()
pq.push(0, "marf")
pq.push(2, "bark", "bark")
pq.push(1, "le")
while pq.count() > 0: print pq.pop()
| Marfle-Bark/myHUMPS | sim/BARQ.py | Python | mit | 1,078 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'DjangoPhonebook.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('phonebook.urls'), name='root_path'),
) | JulienDrecq/django-phonebook | phonebook/tests/urls.py | Python | bsd-3-clause | 343 |
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
import os
import shutil
import subprocess
import sys
import fixtures
import testtools
class D2to1TestCase(testtools.TestCase):
def setUp(self):
super(D2to1TestCase, self).setUp()
self.temp_dir = self.useFixture(fixtures.TempDir()).path
self.package_dir = os.path.join(self.temp_dir, 'testpackage')
shutil.copytree(os.path.join(os.path.dirname(__file__), 'testpackage'),
self.package_dir)
self.addCleanup(os.chdir, os.getcwd())
os.chdir(self.package_dir)
def tearDown(self):
# Remove d2to1.testpackage from sys.modules so that it can be freshly
# re-imported by the next test
for k in list(sys.modules):
if (k == 'd2to1_testpackage' or
k.startswith('d2to1_testpackage.')):
del sys.modules[k]
super(D2to1TestCase, self).tearDown()
def run_setup(self, *args):
return self._run_cmd(sys.executable, ('setup.py',) + args)
def _run_cmd(self, cmd, args):
"""Run a command in the root of the test working copy.
Runs a command, with the given argument list, in the root of the test
working copy--returns the stdout and stderr streams and the exit code
from the subprocess.
"""
os.chdir(self.package_dir)
p = subprocess.Popen([cmd] + list(args), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
streams = tuple(s.decode('latin1').strip() for s in p.communicate())
print(streams)
return (streams) + (p.returncode,)
| tashaband/RYU295 | pbr-0.5.19-py2.7.egg/pbr/d2to1/tests/__init__.py | Python | apache-2.0 | 3,455 |
from __future__ import absolute_import, unicode_literals
from unittest import TestCase
from ..utils import FieldsParameterParseError, parse_boolean, parse_fields_parameter
class TestParseFieldsParameter(TestCase):
# GOOD STUFF
def test_valid_single_field(self):
parsed = parse_fields_parameter('test')
self.assertEqual(parsed, [
('test', False, None),
])
def test_valid_multiple_fields(self):
parsed = parse_fields_parameter('test,another_test')
self.assertEqual(parsed, [
('test', False, None),
('another_test', False, None),
])
def test_valid_negated_field(self):
parsed = parse_fields_parameter('-test')
self.assertEqual(parsed, [
('test', True, None),
])
def test_valid_nested_fields(self):
parsed = parse_fields_parameter('test(foo,bar)')
self.assertEqual(parsed, [
('test', False, [
('foo', False, None),
('bar', False, None),
]),
])
def test_valid_star_field(self):
parsed = parse_fields_parameter('*,-test')
self.assertEqual(parsed, [
('*', False, None),
('test', True, None),
])
def test_valid_star_with_additional_field(self):
# Note: '*,test' is not allowed but '*,test(foo)' is
parsed = parse_fields_parameter('*,test(foo)')
self.assertEqual(parsed, [
('*', False, None),
('test', False, [
('foo', False, None),
]),
])
def test_valid_underscore_field(self):
parsed = parse_fields_parameter('_,test')
self.assertEqual(parsed, [
('_', False, None),
('test', False, None),
])
def test_valid_field_with_underscore_in_middle(self):
parsed = parse_fields_parameter('a_test')
self.assertEqual(parsed, [
('a_test', False, None),
])
def test_valid_negated_field_with_underscore_in_middle(self):
parsed = parse_fields_parameter('-a_test')
self.assertEqual(parsed, [
('a_test', True, None),
])
def test_valid_field_with_underscore_at_beginning(self):
parsed = parse_fields_parameter('_test')
self.assertEqual(parsed, [
('_test', False, None),
])
def test_valid_field_with_underscore_at_end(self):
parsed = parse_fields_parameter('test_')
self.assertEqual(parsed, [
('test_', False, None),
])
# BAD STUFF
def test_invalid_char(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test#')
self.assertEqual(str(e.exception), "unexpected char '#' at position 4")
def test_invalid_whitespace_before_identifier(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter(' test')
self.assertEqual(str(e.exception), "unexpected whitespace at position 0")
def test_invalid_whitespace_after_identifier(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test ')
self.assertEqual(str(e.exception), "unexpected whitespace at position 4")
def test_invalid_whitespace_after_comma(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test, test')
self.assertEqual(str(e.exception), "unexpected whitespace at position 5")
def test_invalid_whitespace_before_comma(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test ,test')
self.assertEqual(str(e.exception), "unexpected whitespace at position 4")
def test_invalid_unexpected_negation_operator(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test-')
self.assertEqual(str(e.exception), "unexpected char '-' at position 4")
def test_invalid_unexpected_open_bracket(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test,(foo)')
self.assertEqual(str(e.exception), "unexpected char '(' at position 5")
def test_invalid_unexpected_close_bracket(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test)')
self.assertEqual(str(e.exception), "unexpected char ')' at position 4")
def test_invalid_unexpected_comma_in_middle(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test,,foo')
self.assertEqual(str(e.exception), "unexpected char ',' at position 5")
def test_invalid_unexpected_comma_at_end(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test,foo,')
self.assertEqual(str(e.exception), "unexpected char ',' at position 9")
def test_invalid_unclosed_bracket(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test(foo')
self.assertEqual(str(e.exception), "unexpected end of input (did you miss out a close bracket?)")
def test_invalid_subfields_on_negated_field(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('-test(foo)')
self.assertEqual(str(e.exception), "unexpected char '(' at position 5")
def test_invalid_star_field_in_wrong_position(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test,*')
self.assertEqual(str(e.exception), "'*' must be in the first position")
def test_invalid_negated_star(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('-*')
self.assertEqual(str(e.exception), "'*' cannot be negated")
def test_invalid_star_with_nesting(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('*(foo,bar)')
self.assertEqual(str(e.exception), "unexpected char '(' at position 1")
def test_invalid_star_with_chars_after(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('*foo')
self.assertEqual(str(e.exception), "unexpected char 'f' at position 1")
def test_invalid_star_with_chars_before(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('foo*')
self.assertEqual(str(e.exception), "unexpected char '*' at position 3")
def test_invalid_star_with_additional_field(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('*,foo')
self.assertEqual(str(e.exception), "additional fields with '*' doesn't make sense")
def test_invalid_underscore_in_wrong_position(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test,_')
self.assertEqual(str(e.exception), "'_' must be in the first position")
def test_invalid_negated_underscore(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('-_')
self.assertEqual(str(e.exception), "'_' cannot be negated")
def test_invalid_underscore_with_nesting(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('_(foo,bar)')
self.assertEqual(str(e.exception), "unexpected char '(' at position 1")
def test_invalid_underscore_with_negated_field(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('_,-foo')
self.assertEqual(str(e.exception), "negated fields with '_' doesn't make sense")
def test_invalid_star_and_underscore(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('*,_')
self.assertEqual(str(e.exception), "'_' must be in the first position")
class TestParseBoolean(TestCase):
# GOOD STUFF
def test_valid_true(self):
parsed = parse_boolean('true')
self.assertEqual(parsed, True)
def test_valid_false(self):
parsed = parse_boolean('false')
self.assertEqual(parsed, False)
def test_valid_1(self):
parsed = parse_boolean('1')
self.assertEqual(parsed, True)
def test_valid_0(self):
parsed = parse_boolean('0')
self.assertEqual(parsed, False)
# BAD STUFF
def test_invalid(self):
with self.assertRaises(ValueError) as e:
parse_boolean('foo')
self.assertEqual(str(e.exception), "expected 'true' or 'false', got 'foo'")
def test_invalid_integer(self):
with self.assertRaises(ValueError) as e:
parse_boolean('2')
self.assertEqual(str(e.exception), "expected 'true' or 'false', got '2'")
| chrxr/wagtail | wagtail/api/v2/tests/tests.py | Python | bsd-3-clause | 9,127 |
# coding: utf-8
import cgi
from datetime import datetime, date
from io import StringIO
from importlib import import_module
from unittest.mock import patch
from django.conf import settings
from django.contrib.sessions.serializers import JSONSerializer
from django.test import TestCase
from django.core.urlresolvers import reverse
import requests
from ..models import Host, Event, Role, Person, Task
from ..util import (
upload_person_task_csv,
verify_upload_person_task,
normalize_event_index_url,
parse_tags_from_event_index,
)
from .base import TestBase
class UploadPersonTaskCSVTestCase(TestCase):
def compute_from_string(self, csv_str):
''' wrap up buffering the raw string & parsing '''
csv_buf = StringIO(csv_str)
# compute and return
return upload_person_task_csv(csv_buf)
def test_basic_parsing(self):
''' See Person.PERSON_UPLOAD_FIELDS for field ordering '''
csv = """personal,middle,family,email
john,a,doe,johndoe@email.com
jane,a,doe,janedoe@email.com"""
person_tasks, _ = self.compute_from_string(csv)
# assert
self.assertEqual(len(person_tasks), 2)
person = person_tasks[0]
self.assertTrue(set(person.keys()).issuperset(set(Person.PERSON_UPLOAD_FIELDS)))
def test_csv_without_required_field(self):
''' All fields in Person.PERSON_UPLOAD_FIELDS must be in csv '''
bad_csv = """personal,middle,family
john,,doe"""
person_tasks, empty_fields = self.compute_from_string(bad_csv)
self.assertTrue('email' in empty_fields)
def test_csv_with_mislabeled_field(self):
''' It pays to be strict '''
bad_csv = """personal,middle,family,emailaddress
john,m,doe,john@doe.com"""
person_tasks, empty_fields = self.compute_from_string(bad_csv)
self.assertTrue('email' in empty_fields)
def test_csv_with_empty_lines(self):
csv = """personal,middle,family,emailaddress
john,m,doe,john@doe.com
,,,"""
person_tasks, empty_fields = self.compute_from_string(csv)
self.assertEqual(len(person_tasks), 1)
person = person_tasks[0]
self.assertEqual(person['personal'], 'john')
def test_empty_field(self):
''' Ensure we don't mis-order fields given blank data '''
csv = """personal,middle,family,email
john,,doe,johndoe@email.com"""
person_tasks, _ = self.compute_from_string(csv)
person = person_tasks[0]
self.assertEqual(person['middle'], '')
def test_serializability_of_parsed(self):
csv = """personal,middle,family,email
john,a,doe,johndoe@email.com
jane,a,doe,janedoe@email.com"""
person_tasks, _ = self.compute_from_string(csv)
try:
serializer = JSONSerializer()
serializer.dumps(person_tasks)
except TypeError:
self.fail('Dumping person_tasks to JSON unexpectedly failed!')
def test_malformed_CSV_with_proper_header_row(self):
csv = """personal,middle,family,email
This is a malformed CSV
"""
person_tasks, empty_fields = self.compute_from_string(csv)
self.assertEqual(person_tasks[0]["personal"],
"This is a malformed CSV")
self.assertEqual(set(empty_fields),
set(["middle", "family", "email"]))
class CSVBulkUploadTestBase(TestBase):
"""
Simply provide necessary setUp and make_data functions that are used in two
different TestCases
"""
def setUp(self):
super(CSVBulkUploadTestBase, self).setUp()
test_host = Host.objects.create(domain='example.com',
fullname='Test Host')
Role.objects.create(name='Instructor')
Role.objects.create(name='learner')
Event.objects.create(start=datetime.now(),
host=test_host,
slug='foobar',
admin_fee=100)
self._setUpUsersAndLogin()
def make_csv_data(self):
"""
Sample CSV data
"""
return """personal,middle,family,email,event,role
John,S,Doe,notin@db.com,foobar,Instructor
"""
def make_data(self):
csv_str = self.make_csv_data()
# upload_person_task_csv gets thoroughly tested in
# UploadPersonTaskCSVTestCase
data, _ = upload_person_task_csv(StringIO(csv_str))
return data
class VerifyUploadPersonTask(CSVBulkUploadTestBase):
''' Scenarios to test:
- Everything is good
- no 'person' key
- event DNE
- role DNE
- email already exists
'''
def test_verify_with_good_data(self):
good_data = self.make_data()
has_errors = verify_upload_person_task(good_data)
self.assertFalse(has_errors)
# make sure 'errors' wasn't set
self.assertIsNone(good_data[0]['errors'])
def test_verify_event_doesnt_exist(self):
bad_data = self.make_data()
bad_data[0]['event'] = 'no-such-event'
has_errors = verify_upload_person_task(bad_data)
self.assertTrue(has_errors)
errors = bad_data[0]['errors']
self.assertTrue(len(errors) == 1)
self.assertTrue('Event with slug' in errors[0])
def test_verify_role_doesnt_exist(self):
bad_data = self.make_data()
bad_data[0]['role'] = 'foobar'
has_errors = verify_upload_person_task(bad_data)
self.assertTrue(has_errors)
errors = bad_data[0]['errors']
self.assertTrue(len(errors) == 1)
self.assertTrue('Role with name' in errors[0])
def test_verify_email_caseinsensitive_matches(self):
bad_data = self.make_data()
# test both matching and case-insensitive matching
for email in ('harry@hogwarts.edu', 'HARRY@hogwarts.edu'):
bad_data[0]['email'] = email
bad_data[0]['personal'] = 'Harry'
bad_data[0]['middle'] = None
bad_data[0]['family'] = 'Potter'
has_errors = verify_upload_person_task(bad_data)
self.assertFalse(has_errors)
def test_verify_name_matching_existing_user(self):
bad_data = self.make_data()
bad_data[0]['email'] = 'harry@hogwarts.edu'
has_errors = verify_upload_person_task(bad_data)
self.assertTrue(has_errors)
errors = bad_data[0]['errors']
self.assertEqual(len(errors), 3)
self.assertTrue('personal' in errors[0])
self.assertTrue('middle' in errors[1])
self.assertTrue('family' in errors[2])
def test_verify_existing_user_has_workshop_role_provided(self):
bad_data = [
{
'email': 'harry@hogwarts.edu',
'personal': 'Harry',
'middle': None,
'family': 'Potter',
'event': '',
'role': '',
}
]
has_errors = verify_upload_person_task(bad_data)
self.assertTrue(has_errors)
errors = bad_data[0]['errors']
self.assertEqual(len(errors), 1)
self.assertTrue("User exists but no event and role to assign"
in errors[0])
class BulkUploadUsersViewTestCase(CSVBulkUploadTestBase):
def setUp(self):
super().setUp()
Role.objects.create(name='Helper')
def test_event_name_dropped(self):
"""
Test for regression:
test whether event name is really getting empty when user changes it
from "foobar" to empty.
"""
data = self.make_data()
# self.client is authenticated user so we have access to the session
store = self.client.session
store['bulk-add-people'] = data
store.save()
# send exactly what's in 'data', except for the 'event' field: leave
# this one empty
payload = {
"personal": data[0]['personal'],
"middle": data[0]['middle'],
"family": data[0]['family'],
"email": data[0]['email'],
"event": "",
"role": "",
"verify": "Verify",
}
rv = self.client.post(reverse('person_bulk_add_confirmation'), payload)
self.assertEqual(rv.status_code, 200)
_, params = cgi.parse_header(rv['content-type'])
charset = params['charset']
content = rv.content.decode(charset)
self.assertNotIn('foobar', content)
def test_upload_existing_user(self):
"""
Check if uploading existing users ends up with them having new role
assigned.
This is a special case of upload feature: if user uploads a person that
already exists we should only assign new role and event to that person.
"""
csv = """personal,middle,family,email,event,role
Harry,,Potter,harry@hogwarts.edu,foobar,Helper
"""
data, _ = upload_person_task_csv(StringIO(csv))
# self.client is authenticated user so we have access to the session
store = self.client.session
store['bulk-add-people'] = data
store.save()
# send exactly what's in 'data'
payload = {
"personal": data[0]['personal'],
"middle": data[0]['middle'],
"family": data[0]['family'],
"email": data[0]['email'],
"event": data[0]['event'],
"role": data[0]['role'],
"confirm": "Confirm",
}
people_pre = set(Person.objects.all())
tasks_pre = set(Task.objects.filter(person=self.harry,
event__slug="foobar"))
rv = self.client.post(reverse('person_bulk_add_confirmation'), payload,
follow=True)
self.assertEqual(rv.status_code, 200)
people_post = set(Person.objects.all())
tasks_post = set(Task.objects.filter(person=self.harry,
event__slug="foobar"))
# make sure no-one new was added
self.assertSetEqual(people_pre, people_post)
# make sure that Harry was assigned a new role
self.assertNotEqual(tasks_pre, tasks_post)
def test_upload_existing_user_existing_task(self):
"""
Check if uploading existing user and assigning existing task to that
user is silent (ie. no Task nor Person is being created).
"""
foobar = Event.objects.get(slug="foobar")
instructor = Role.objects.get(name="Instructor")
Task.objects.create(person=self.harry, event=foobar, role=instructor)
csv = """personal,middle,family,email,event,role
Harry,,Potter,harry@hogwarts.edu,foobar,Instructor
"""
data, _ = upload_person_task_csv(StringIO(csv))
# self.client is authenticated user so we have access to the session
store = self.client.session
store['bulk-add-people'] = data
store.save()
# send exactly what's in 'data'
payload = {
"personal": data[0]['personal'],
"middle": data[0]['middle'],
"family": data[0]['family'],
"email": data[0]['email'],
"event": data[0]['event'],
"role": data[0]['role'],
"confirm": "Confirm",
}
tasks_pre = set(Task.objects.filter(person=self.harry,
event__slug="foobar"))
users_pre = set(Person.objects.all())
rv = self.client.post(reverse('person_bulk_add_confirmation'), payload,
follow=True)
tasks_post = set(Task.objects.filter(person=self.harry,
event__slug="foobar"))
users_post = set(Person.objects.all())
self.assertEqual(tasks_pre, tasks_post)
self.assertEqual(users_pre, users_post)
self.assertEqual(rv.status_code, 200)
def test_attendance_increases(self):
"""
Check if uploading tasks with role "learner" increase event's
attendance.
"""
foobar = Event.objects.get(slug="foobar")
assert foobar.attendance is None
foobar.save()
csv = """personal,middle,family,email,event,role
Harry,,Potter,harry@hogwarts.edu,foobar,learner
"""
data, _ = upload_person_task_csv(StringIO(csv))
# self.client is authenticated user so we have access to the session
store = self.client.session
store['bulk-add-people'] = data
store.save()
# send exactly what's in 'data'
payload = {
"personal": data[0]['personal'],
"middle": data[0]['middle'],
"family": data[0]['family'],
"email": data[0]['email'],
"event": data[0]['event'],
"role": data[0]['role'],
"confirm": "Confirm",
}
self.client.post(reverse('person_bulk_add_confirmation'), payload,
follow=True)
foobar.refresh_from_db()
self.assertEqual(1, foobar.attendance)
class TestEventURLNormalization(TestCase):
def setUp(self):
self.test_cases = [
'http://user-name.github.io/2015-07-13-City/',
'https://user-name.github.io/2015-07-13-City/',
'http://user-name.github.io/2015-07-13-City',
'https://user-name.github.io/2015-07-13-City',
'http://user-name.github.io/2015-07-13-City/index.html',
'https://github.com/user-name/2015-07-13-City/',
'https://github.com/user-name/2015-07-13-City',
('https://github.com/user-name/2015-07-13-City/blob/'
'gh-pages/index.html'),
]
self.output = ('https://raw.githubusercontent.com/user-name/'
'2015-07-13-City/gh-pages/index.html')
def test_normalization(self):
for url in self.test_cases:
assert normalize_event_index_url(url)[0] == self.output
class TestParsingEventHeaders(TestCase):
maxDiff = None # enable long diff in output
# Response.status_code apparently doesn't exist by default
@patch.object(requests.models.Response, 'status_code', 404, create=True)
def test_wrong_url(self):
with self.assertRaises(requests.exceptions.HTTPError):
url = 'http://test.github.io/2015-07-13-test/'
parse_tags_from_event_index(url)
@patch.object(requests, 'get')
def test_parsing_event_index(self, mock_get):
mock_get.return_value.status_code = 200
mock_get.return_value.text = """---
layout: workshop
root: .
venue: Euphoric State University
address: Highway to Heaven 42, Academipolis
country: USA
language: us
latlng: 36.998977, -109.045173
humandate: Jul 13-14, 2015
humantime: 9:00 - 17:00
startdate: 2015-07-13
enddate: 2015-07-14
instructor: ["Hermione Granger", "Harry Potter", "Ron Weasley",]
helper: ["Peter Parker", "Tony Stark", "Natasha Romanova",]
contact: hermione@granger.co.uk, rweasley@ministry.gov.uk
etherpad:
eventbrite: 10000000
---
"""
url = 'http://test.github.io/2015-07-13-test/'
notes = """INSTRUCTORS: Hermione Granger, Harry Potter, Ron Weasley
HELPERS: Peter Parker, Tony Stark, Natasha Romanova
COUNTRY: USA"""
expected = {
'slug': '2015-07-13-test',
'start': date(2015, 7, 13),
'end': date(2015, 7, 14),
'url': 'https://test.github.io/2015-07-13-test/',
'reg_key': 1e7,
'contact': 'hermione@granger.co.uk, rweasley@ministry.gov.uk',
'notes': notes,
'venue': 'Euphoric State University',
'address': 'Highway to Heaven 42, Academipolis',
'country': 'USA',
'latitude': '36.998977',
'longitude': '-109.045173',
}
self.assertEqual(parse_tags_from_event_index(url), expected)
@patch.object(requests, 'get')
def test_parsing_malformed_file(self, mock_get):
mock_get.return_value.status_code = 200
mock_get.return_value.text = """---
venues: Euphoric State University
addresses: Highway to Heaven 42, Academipolis
countries: USA
startdates: 2015-07-13
enddates: 2015-07-14
instructors: ["Hermione Granger", "Harry Potter", "Ron Weasley",]
helpers: ["Peter Parker", "Tony Stark", "Natasha Romanova",]
contacts: hermione@granger.co.uk, rweasley@ministry.gov.uk
eventbrites: 10000000
---
"""
url = 'http://test.github.io/2015-07-13-test/'
notes = "INSTRUCTORS: \n\nHELPERS: \n\nCOUNTRY: "
expected = {
'slug': '2015-07-13-test',
'start': '',
'end': '',
'url': 'https://test.github.io/2015-07-13-test/',
'reg_key': '',
'contact': '',
'notes': notes,
'venue': '',
'address': '',
'country': '',
'latitude': '',
'longitude': '',
}
self.assertEqual(parse_tags_from_event_index(url), expected)
@patch.object(requests, 'get')
def test_parsing_empty_list_values(self, mock_get):
mock_get.return_value.status_code = 200
mock_get.return_value.text = """---
venue: Euphoric State University
address: Highway to Heaven 42, Academipolis
country: USA
language: us
latlng: 36.998977, -109.045173
humandate: Jul 13-14, 2015
humantime: 9:00 - 17:00
startdate: 2015-07-13
enddate: 2015-07-14
instructor: # instructors
helper: # helpers
contact: hermione@granger.co.uk, rweasley@ministry.gov.uk
etherpad:
eventbrite: 10000000
---
"""
url = 'http://test.github.io/2015-07-13-test/'
notes = """INSTRUCTORS: \n\nHELPERS: \n\nCOUNTRY: USA"""
expected = {
'slug': '2015-07-13-test',
'start': date(2015, 7, 13),
'end': date(2015, 7, 14),
'url': 'https://test.github.io/2015-07-13-test/',
'reg_key': 1e7,
'contact': 'hermione@granger.co.uk, rweasley@ministry.gov.uk',
'notes': notes,
'venue': 'Euphoric State University',
'address': 'Highway to Heaven 42, Academipolis',
'country': 'USA',
'latitude': '36.998977',
'longitude': '-109.045173',
}
self.assertEqual(parse_tags_from_event_index(url), expected)
@patch.object(requests, 'get')
def test_parsing_2letter_country(self, mock_get):
mock_get.return_value.status_code = 200
mock_get.return_value.text = """---
venue: Euphoric State University
address: Highway to Heaven 42, Academipolis
country: us
startdate: 2015-07-13
enddate: 2015-07-14
instructor: ["Hermione Granger", "Harry Potter", "Ron Weasley",]
helper: ["Peter Parker", "Tony Stark", "Natasha Romanova",]
contact: hermione@granger.co.uk, rweasley@ministry.gov.uk
eventbrite: 10000000
---
"""
url = 'http://test.github.io/2015-07-13-test/'
rv = parse_tags_from_event_index(url)
self.assertEqual(rv['country'], 'US')
@patch.object(requests, 'get')
def test_parsing_old_format_country(self, mock_get):
mock_get.return_value.status_code = 200
mock_get.return_value.text = """---
venue: Euphoric State University
address: Highway to Heaven 42, Academipolis
country: United-States
startdate: 2015-07-13
enddate: 2015-07-14
instructor: ["Hermione Granger", "Harry Potter", "Ron Weasley",]
helper: ["Peter Parker", "Tony Stark", "Natasha Romanova",]
contact: hermione@granger.co.uk, rweasley@ministry.gov.uk
eventbrite: 10000000
---
"""
url = 'http://test.github.io/2015-07-13-test/'
rv = parse_tags_from_event_index(url)
self.assertEqual(rv['country'], 'US')
| wking/swc-amy | workshops/test/test_util.py | Python | mit | 19,660 |
""":mod:`padak` --- Padak EDSL template engine
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An EDSL template engline written in Python.
:copyright: (c) 2012 by Hyunjun Kim.
:license: MIT, see LICENSE for more details.
"""
| yoloseem/padak | padak/__init__.py | Python | mit | 239 |
import unittest
from enum import Enum
from test.asserting.config_source import ConfigSourceAssertion, get_fixture_path
from vint.linting.config.config_global_source import ConfigGlobalSource
class TestConfigGlobalSource(ConfigSourceAssertion, unittest.TestCase):
def test_get_config_dict(self):
env = {
'home_path': get_fixture_path('dummy_home'),
'xdg_config_home': get_fixture_path('unexistent_xdg_config_home'),
}
expected_config_dict = {
'cmdargs': {
'verbose': bool,
'error-limit': int,
'severity': Enum,
}
}
config_source = self.initialize_config_source_with_env(ConfigGlobalSource, env)
self.assertConfigValueType(config_source, expected_config_dict)
def test_get_config_dict_with_no_global_config(self):
env = {
'home_path': get_fixture_path('unexistent_home'),
'xdg_config_home': get_fixture_path('unexistent_xdg_config_home'),
}
expected_config_dict = {}
config_source = self.initialize_config_source_with_env(ConfigGlobalSource, env)
self.assertConfigDict(config_source, expected_config_dict)
def test_get_config_dict_with_default_xdg_config_home(self):
env = {
'home_path': get_fixture_path('unexistent_home'),
'xdg_config_home': get_fixture_path('xdg_config_home'),
}
expected_config_dict = {
'cmdargs': {
'verbose': bool,
'error-limit': int,
'severity': Enum,
}
}
config_source = self.initialize_config_source_with_env(ConfigGlobalSource, env)
self.assertConfigValueType(config_source, expected_config_dict)
if __name__ == '__main__':
unittest.main()
| RianFuro/vint | test/unit/vint/linting/config/test_config_global_source.py | Python | mit | 1,841 |
#! /usr/bin/env python -u
# coding=utf-8
__author__ = 'xl'
| Wilbeibi/cloudapp-mp2 | internal_use/__init__.py | Python | apache-2.0 | 60 |
import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __unicode__(self): # __unicode__ on Python 2
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __unicode__(self): # __unicode__ on Python 2
return self.choice_text
| JCraft40/finalproject | polls/models.py | Python | gpl-2.0 | 899 |
"""Support for collecting data from the ARWN project."""
import json
import logging
from homeassistant.components import mqtt
from homeassistant.const import TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.core import callback
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
_LOGGER = logging.getLogger(__name__)
DOMAIN = "arwn"
DATA_ARWN = "arwn"
TOPIC = "arwn/#"
def discover_sensors(topic, payload):
"""Given a topic, dynamically create the right sensor type.
Async friendly.
"""
parts = topic.split("/")
unit = payload.get("units", "")
domain = parts[1]
if domain == "temperature":
name = parts[2]
if unit == "F":
unit = TEMP_FAHRENHEIT
else:
unit = TEMP_CELSIUS
return ArwnSensor(name, "temp", unit)
if domain == "moisture":
name = parts[2] + " Moisture"
return ArwnSensor(name, "moisture", unit, "mdi:water-percent")
if domain == "rain":
if len(parts) >= 3 and parts[2] == "today":
return ArwnSensor(
"Rain Since Midnight", "since_midnight", "in", "mdi:water"
)
if domain == "barometer":
return ArwnSensor("Barometer", "pressure", unit, "mdi:thermometer-lines")
if domain == "wind":
return (
ArwnSensor("Wind Speed", "speed", unit, "mdi:speedometer"),
ArwnSensor("Wind Gust", "gust", unit, "mdi:speedometer"),
ArwnSensor("Wind Direction", "direction", "°", "mdi:compass"),
)
def _slug(name):
return "sensor.arwn_{}".format(slugify(name))
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the ARWN platform."""
@callback
def async_sensor_event_received(msg):
"""Process events as sensors.
When a new event on our topic (arwn/#) is received we map it
into a known kind of sensor based on topic name. If we've
never seen this before, we keep this sensor around in a global
cache. If we have seen it before, we update the values of the
existing sensor. Either way, we push an ha state update at the
end for the new event we've seen.
This lets us dynamically incorporate sensors without any
configuration on our side.
"""
event = json.loads(msg.payload)
sensors = discover_sensors(msg.topic, event)
if not sensors:
return
store = hass.data.get(DATA_ARWN)
if store is None:
store = hass.data[DATA_ARWN] = {}
if isinstance(sensors, ArwnSensor):
sensors = (sensors,)
if "timestamp" in event:
del event["timestamp"]
for sensor in sensors:
if sensor.name not in store:
sensor.hass = hass
sensor.set_event(event)
store[sensor.name] = sensor
_LOGGER.debug(
"Registering new sensor %(name)s => %(event)s",
dict(name=sensor.name, event=event),
)
async_add_entities((sensor,), True)
else:
store[sensor.name].set_event(event)
await mqtt.async_subscribe(hass, TOPIC, async_sensor_event_received, 0)
return True
class ArwnSensor(Entity):
"""Representation of an ARWN sensor."""
def __init__(self, name, state_key, units, icon=None):
"""Initialize the sensor."""
self.hass = None
self.entity_id = _slug(name)
self._name = name
self._state_key = state_key
self.event = {}
self._unit_of_measurement = units
self._icon = icon
def set_event(self, event):
"""Update the sensor with the most recent event."""
self.event = {}
self.event.update(event)
self.async_schedule_update_ha_state()
@property
def state(self):
"""Return the state of the device."""
return self.event.get(self._state_key, None)
@property
def name(self):
"""Get the name of the sensor."""
return self._name
@property
def state_attributes(self):
"""Return all the state attributes."""
return self.event
@property
def unit_of_measurement(self):
"""Return the unit of measurement the state is expressed in."""
return self._unit_of_measurement
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def icon(self):
"""Return the icon of device based on its type."""
return self._icon
| leppa/home-assistant | homeassistant/components/arwn/sensor.py | Python | apache-2.0 | 4,648 |
#!/usr/bin/python
from k5test import *
conf = {'realms': {'$realm': {'max_life': '20h', 'max_renewable_life': '20h'}}}
realm = K5Realm(create_host=False, get_creds=False, kdc_conf=conf)
def test(testname, life, rlife, expect_renewable, env=None):
global realm
flags = ['-l', life]
if rlife is not None:
flags += ['-r', rlife]
realm.kinit(realm.user_princ, password('user'), flags=flags, env=env)
out = realm.run([klist])
if ('Default principal: %s\n' % realm.user_princ) not in out:
fail('%s: did not get tickets' % testname)
renewable = 'renew until' in out
if renewable and not expect_renewable:
fail('%s: tickets unexpectedly renewable' % testname)
elif not renewable and expect_renewable:
fail('%s: tickets unexpectedly non-renewable' % testname)
# Get renewable tickets.
test('simple', '1h', '2h', True)
# Renew twice, to test that renewed tickets are renewable.
realm.kinit(realm.user_princ, flags=['-R'])
realm.kinit(realm.user_princ, flags=['-R'])
realm.klist(realm.user_princ)
# Make sure we can't renew non-renewable tickets.
test('non-renewable', '1h', '1h', False)
out = realm.kinit(realm.user_princ, flags=['-R'], expected_code=1)
if "KDC can't fulfill requested option" not in out:
fail('expected error not seen renewing non-renewable ticket')
# Test that -allow_renewable on the client principal works.
realm.run_kadminl('modprinc -allow_renewable user')
test('disallowed client', '1h', '2h', False)
realm.run_kadminl('modprinc +allow_renewable user')
# Test that -allow_renewable on the server principal works.
realm.run_kadminl('modprinc -allow_renewable %s' % realm.krbtgt_princ)
test('disallowed server', '1h', '2h', False)
realm.run_kadminl('modprinc +allow_renewable %s' % realm.krbtgt_princ)
# Test that non-renewable tickets are issued if renew_till < till.
test('short', '2h', '1h', False)
# Test that renewable tickets are issued if till > max life by
# default, but not if we configure away the RENEWABLE-OK option.
no_opts_conf = {'libdefaults': {'kdc_default_options': '0'}}
no_opts = realm.special_env('no_opts', False, krb5_conf=no_opts_conf)
realm.run_kadminl('modprinc -maxlife "10 hours" user')
test('long', '15h', None, True)
test('long noopts', '15h', None, False, env=no_opts)
realm.run_kadminl('modprinc -maxlife "20 hours" user')
# Test maximum renewable life on the client principal.
realm.run_kadminl('modprinc -maxrenewlife "5 hours" user')
test('maxrenewlife client yes', '4h', '5h', True)
test('maxrenewlife client no', '6h', '10h', False)
# Test maximum renewable life on the server principal.
realm.run_kadminl('modprinc -maxrenewlife "3 hours" %s' % realm.krbtgt_princ)
test('maxrenewlife server yes', '2h', '3h', True)
test('maxrenewlife server no', '4h', '8h', False)
# Test realm maximum life.
realm.run_kadminl('modprinc -maxrenewlife "40 hours" user')
realm.run_kadminl('modprinc -maxrenewlife "40 hours" %s' % realm.krbtgt_princ)
test('maxrenewlife realm yes', '10h', '20h', True)
test('maxrenewlife realm no', '21h', '40h', False)
success('Renewing credentials')
| drankye/kerb-token | krb5/src/tests/t_renew.py | Python | apache-2.0 | 3,101 |
"""Update models to avoid uniqueness errors
Revision ID: 4148c3cb14ad
Revises: 21e927fdf78c
Create Date: 2015-04-24 23:27:26.628208
"""
# revision identifiers, used by Alembic.
revision = '4148c3cb14ad'
down_revision = '21e927fdf78c'
import hashlib
from alembic import op
import sqlalchemy as sa
import sqlalchemy.sql as sql
def upgrade():
try:
### commands auto generated by Alembic - please adjust! ###
op.add_column('TranslationExternalSuggestions', sa.Column('human_key_hash', sa.Unicode(length=36), nullable=True))
op.create_index(u'ix_TranslationExternalSuggestions_human_key_hash', 'TranslationExternalSuggestions', ['human_key_hash'], unique=False)
### end Alembic commands ###
except:
print "Not supported in SQLite"
try:
op.drop_constraint("engine", "TranslationExternalSuggestions", "unique")
except:
print "drop constraint not supported in SQLite"
metadata = sa.MetaData()
ExternalSuggestions = sa.Table('TranslationExternalSuggestions', metadata,
sa.Column('id', sa.Integer()),
sa.Column('human_key', sa.Unicode(255)),
sa.Column('human_key_hash', sa.Unicode(36)),
)
existing_suggestions = sql.select([ExternalSuggestions.c.id, ExternalSuggestions.c.human_key])
for row in op.get_bind().execute(existing_suggestions):
suggestion_id = row[ExternalSuggestions.c.id]
human_key_hash = hashlib.md5(row[ExternalSuggestions.c.human_key]).hexdigest()
update_stmt = ExternalSuggestions.update().where(ExternalSuggestions.c.id == suggestion_id).values(human_key_hash = human_key_hash)
op.execute(update_stmt)
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_TranslationExternalSuggestions_human_key_hash', table_name='TranslationExternalSuggestions')
op.drop_column('TranslationExternalSuggestions', 'human_key_hash')
### end Alembic commands ###
| go-lab/appcomposer | alembic/versions/4148c3cb14ad_update_models_to_avoid_uniqueness_errors.py | Python | bsd-2-clause | 1,977 |
"""Wrapper around gdax-python.
Gdax-python is the unofficial python library for GDAX.
https://github.com/danpaquin/gdax-python
https://pypi.python.org/pypi/gdax
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# OrderedDict retains its key order, so we get consistent column ordering.
from collections import OrderedDict
import functools
import logging
import string
import sys
import traceback
# https://pypi.python.org/pypi/colorama
import colorama
colorama.init()
# https://pypi.python.org/pypi/tabulate
from tabulate import tabulate
from gdaxcli import exceptions
from gdaxcli import utils
try:
import gdax
# TODO: include other non-standard libraries in this as well.
except ImportError:
traceback.print_exc()
print('Unable to import gdax. Make sure you follow the installation'
' instructions at https://github.com/sonph/gdaxcli')
sys.exit(1)
DIGITS = set(string.digits)
# 'CAD' is available in the sandbox.
FIAT_CURRENCY = set(['USD', 'CAD', 'GBP', 'EUR'])
# TODO: make this configurable.
DEFAULT_ACCURACY = 4
tabulate = functools.partial(tabulate,
tablefmt='simple', headers='keys', floatfmt='.%df' % DEFAULT_ACCURACY)
negative = lambda x: float(x) < 0
nonnegative = lambda x: float(x) >= 0
positive = lambda x: float(x) > 0
nonpositive = lambda x: float(x) <= 0
def format_float(value, accuracy=DEFAULT_ACCURACY):
"""Formatting the value as a float with set number of digits after the dot.
This is only needed if we want to colorize it or use a different number of
digits other than the default, before adding it into the table. Otherwise,
tabulate automatically formats the value for us.
"""
placeholder = '%.' + str(accuracy) + 'f'
return placeholder % float(value)
def colorize(value, condition, accuracy=None):
"""Return green string if condition is true; red otherwise.
Args:
value: Value to return as string. If it's a float, it will be formatted.
condition: Either a bool or a lambda.
"""
if isinstance(value, float):
if accuracy is None:
value = format_float(value)
else:
value = format_float(value, accuracy)
if not isinstance(condition, bool):
condition = condition(value)
color = colorama.Fore.GREEN if condition else colorama.Fore.RED
return color + value + colorama.Style.RESET_ALL
green = lambda value: colorize(value, True)
red = lambda value: colorize(value, False)
def is_str_zero(s):
"""Returns True is string s is strictly zero.
Converting the value to float and comparing with 0 within a set threshold is
another approach, but since gdax returns a string, why not just check it?
"""
for char in s:
if char in DIGITS:
if char != '0':
return False
return True
def confirm(message='Proceed?'):
ok = set(['y', 'Y'])
response = raw_input('%s [y/N]: ' % message)
if response == '':
print('Enter y or Y to proceed.')
return response in ok
class Client(object):
"""Wrapper of the gdax-python library."""
def __init__(self):
"""Initializer."""
config = utils.read_config()
self._client = gdax.AuthenticatedClient(
key=config['key'],
b64secret=config['secret'],
passphrase=config['passphrase'])
# TODO: configure sandbox keys.
# TODO: allow public client.
def products(self):
"""Lists products available for trading."""
rows = []
for product in self._client.get_products():
rows.append(OrderedDict([
('id', product['id']),
('base_currency', product['base_currency']),
('quote_currency', product['quote_currency']),
('base_min_size', product['base_min_size']),
('base_max_size', product['base_max_size']),
('quote_increment', product['quote_increment']),
]))
print(tabulate(rows))
def ticker(self, product_ids=None):
# TODO: Configure default products or currencies e.g. USD only, ETH only.
rows = []
if product_ids is None:
product_ids = self._get_product_ids()
for product_id in product_ids:
tick = self._client.get_product_ticker(product_id)
gap = float(tick['ask']) - float(tick['bid'])
stats = self._client.get_product_24hr_stats(product_id)
gain = float(tick['price']) - float(stats['open'])
gain_perc = gain / float(stats['open']) * 100
rows.append(OrderedDict([
('product_id', product_id),
('price', tick['price']),
('size', tick['size']),
('bid', tick['bid']),
('ask', tick['ask']),
('gap', gap),
('24h_volume', tick['volume']),
('24h_open', stats['open']),
('24h_high', stats['high']),
('24h_low', stats['low']),
('24h_gain', colorize(gain, nonnegative)),
('perc', colorize(format_float(gain_perc, 2),
nonnegative(gain_perc)))
]))
print(tabulate(rows))
def balance(self):
rows = []
prices = {}
for product_id in self._get_product_ids():
# TODO: support other currencies
if product_id.endswith('-USD'):
prices[product_id] = self._client.get_product_ticker(
product_id)['price']
# Total all accounts converted to USD
balance_total = 0
# Map of account currency -> account total in USD
acc_totals = {}
accounts = self._client.get_accounts()
accounts.sort(key=lambda acc: acc['currency'])
for acc in accounts:
hodl = acc['hold']
if acc['currency'] not in FIAT_CURRENCY:
acc_total_usd = float(prices[acc['currency'] + '-USD']) * float(acc['balance'])
else:
acc_total_usd = float(acc['balance'])
# We need to store float values here, since the OrderedDict's have the
# colored strings.
acc_totals[acc['currency']] = acc_total_usd
# Calculate sum total.
balance_total += acc_total_usd
rows.append(OrderedDict([
('currency', acc['currency']),
('balance', acc['balance']),
('available', acc['available']),
('hold', red(hodl) if not is_str_zero(hodl) else hodl),
('total_usd', acc_total_usd)
]))
# Calculate percent holding in each of teh currencies as `perc` column.
for acc in rows:
if acc['currency'] != 'TOTAL':
if balance_total > 0:
perc = float(acc_totals[acc['currency']]) / balance_total * 100
acc['perc'] = perc
else:
acc['perc'] = 100.00
print(tabulate(rows))
print('\nAccount total balance in USD: %s' % format_float(balance_total))
def history(self, accounts):
"""Get trade history for specified accounts: USD, BTC, ETH, LTC, etc."""
# TODO: allow user to specify what currency to use
acc_ids = []
for acc in self._client.get_accounts():
currency = acc['currency']
if currency in accounts:
acc_ids.append((acc['id'], currency))
for index, value in enumerate(acc_ids):
acc_id, currency = value
rows = []
if index != 0:
print()
print('Account: %s' % currency)
for page in self._client.get_account_history(acc_id):
for item in page:
is_green = True
product, type_, amount = '', item['type'], float(item['amount'])
if type_ == 'transfer':
transfer_type = item['details']['transfer_type']
is_green = (transfer_type == 'deposit')
type_ = 'transfer (%s)' % transfer_type
elif type_ == 'match':
product = item['details']['product_id']
is_green = nonnegative(amount)
elif type_ == 'fee':
is_green = False
rows.append(OrderedDict([
('type', colorize(type_, is_green)),
('amount', colorize(amount, is_green)),
('balance', format_float(item['balance'])),
('product_id', product),
('created_at', item['created_at']),
]))
print(tabulate(rows, numalign="decimal"))
def orders(self):
rows = []
pages = self._client.get_orders()
for page in pages:
for order in page:
rows.append(self._parse_order(order))
if rows:
print(tabulate(rows))
else:
print('No pending orders')
def order(self, order_type, side, product, size, price,
skip_confirmation=False):
"""Place an order.
Args:
order_type: One of limit, market or stop.
side: One of buy or sell.
product: The product to be exchanged. Can be uppercased or lowercased.
For example: eth-usd, BTC-GBP, ...
size: The amount to buy or sell. Can be coin or fiat.
price: Price to place limit/stop order. Ignored if order type is market.
Price can be relative to the current ticker price by prepending
the difference amount with + or - . Order is checked to make sure
you're not buying higher or selling lower than current price.
skip_confirmation: If True, do not ask for confirmation.
"""
product = product.upper()
self._check_valid_order(order_type, side, product, size, price)
current_price = float(self._client.get_product_ticker(product)['price'])
if order_type == 'market':
total = float(size) * current_price
price = current_price
elif order_type == 'limit':
abs_price, amount = self._parse_price(price, current_price)
if side == 'buy' and amount >= 0:
raise exceptions.InvalidOrderError(
'Error: Buying higher than or equal to current price:'
' %s >= %.2f' % (abs_price, current_price))
return
elif side == 'sell' and amount <= 0:
raise exceptions.InvalidOrderError(
'Error: Selling lower than or equal to current price:'
' %s <= %.2f' % (abs_price, current_price))
return
# TODO: make time_in_force, post_only configurable.
price = abs_price
elif order_type == 'stop':
# TODO
raise NotImplementedError('This functionality is not yet implemented.')
kwargs = {
'product_id': product,
'type': order_type,
'side': side,
'size': size,
}
# TODO: read the self trade prevention option from config
diff = ''
if order_type == 'limit':
kwargs['price'] = abs_price
diff = float(price) - current_price
diff = ' (' + colorize('%.2f' % diff, negative) + ')'
total = float(size) * float(price)
print('Placing %s order: %s %s %s @ %s%s; total %.2f' % (
order_type.upper(), colorize(side, lambda side: side == 'buy'), size,
product, price, diff, total))
if skip_confirmation or confirm():
if side == 'buy':
print(self._client.buy(**kwargs))
else:
print(self._client.sell(**kwargs))
else:
print('Did nothing')
def order_cancel(self, order_id_prefix, skip_confirmation=False):
order_ids = []
pages = self._client.get_orders()
for page in pages:
for order in page:
order_ids.append(order['id'])
possible_matches = []
for order_id in order_ids:
if order_id.startswith(order_id_prefix):
possible_matches.append(order_id)
if not possible_matches:
print('Order prefix does not match any')
return
if len(possible_matches) > 1:
print('Order prefix too short; cannot uniquely identify an order')
return
order = self._client.get_order(possible_matches[0])
# TODO: factor out this error checking logic
if isinstance(order, dict) and order.has_key('message'):
print(order)
print(tabulate([self._parse_order(order)]))
if skip_confirmation or confirm('Cancel order?'):
print(self._client.cancel_order(order_id))
def fills(self, product=None):
rows = []
pages = self._client.get_fills(product_id=product)
for page in pages:
for fill in page:
size, price = float(fill['size']), float(fill['price'])
size_usd = size * price
fee = fill['fee']
rows.append(OrderedDict([
('product_id', fill['product_id']),
('side', colorize(fill['side'], lambda side: side == 'buy')),
('price', price),
('size', size),
('size_usd', size_usd),
('fee', red(fee) if not is_str_zero(fee) else fee),
('settled', 'yes' if fill['settled'] else red('no')),
('created_at', fill['created_at']),
]))
if rows:
print(tabulate(rows))
else:
print('No fills')
# TODO: support product arg.
def cancel_all(self, product):
if confirm('Cancel ALL orders for %s?' % product):
print(self._client.cancel_all(product=product))
def _parse_order(self, order):
size, price = float(order['size']), float(order['price'])
size_usd = size * price
fill_fees = order['fill_fees']
return OrderedDict([
('id', order['id'][:6]),
('product_id', order['product_id']),
('side', colorize(order['side'], lambda x: x == 'buy')),
('type', order['type']),
('price', price),
('size', size),
('size_usd', size_usd),
('filled_size', order['filled_size']),
('fill_fees', red(format_float(fill_fees)) if not is_str_zero(fill_fees) else float(fill_fees)),
('status', colorize(order['status'], lambda x: x == 'open')),
('time_in_force', order['time_in_force']),
('settled', 'yes' if order['settled'] else red('no')),
('stp', order['stp']),
('created_at', order['created_at']),
# TODO: local date.
])
def _parse_price(self, price, current_price):
# TODO: make default diff amount configurable.
if price[0] in DIGITS:
# Absolute price.
return (self._truncate(price, 2), float(price) - current_price)
# Relative price.
amount = float(price[1:])
if price.startswith('-'):
amount = -amount
abs_price = current_price + amount
# If we simply call str, it may return a scientific notation e.g. 5e-5.
return (self._truncate('%.6f' % abs_price, 2), amount)
def _check_valid_order(
self, order_type, side, product, size, price):
product = product.upper()
product_ids = self._get_product_ids()
# TODO: throw more meaningful error messages.
assert order_type in set(['market', 'limit', 'stop'])
assert side in set(['buy', 'sell'])
assert product in product_ids
float(size)
if order_type != 'market':
assert price[0] in (DIGITS | set(['-', '+']))
def _get_product_ids(self):
"""Gets sorted list of products."""
products = self._client.get_products()
product_ids = [p['id'] for p in products]
product_ids.sort()
return product_ids
def _truncate(self, s, digits):
"""Truncate the value to the number of digits after the dot specified.
We don't round up because rounding up can cause issues. For example you have
0.1111115 BTC, but rounding up could show 0.111112, which exceeds the actual
amount when you try to sell all of it.
"""
if not isinstance(s, str):
s = str(s)
for index, char in enumerate(s):
if char == '.':
dot_index = index
end = dot_index + digits + 1
break
else:
end = len(s)
return s[:end]
| sonph/gdaxcli | gdaxcli/gdax_utils.py | Python | mit | 15,218 |
"""
This tutorial introduces logistic regression using Theano and stochastic
gradient descent.
Logistic regression is a probabilistic, linear classifier. It is parametrized
by a weight matrix :math:`W` and a bias vector :math:`b`. Classification is
done by projecting data points onto a set of hyperplanes, the distance to
which is used to determine a class membership probability.
Mathematically, this can be written as:
.. math::
P(Y=i|x, W,b) &= softmax_i(W x + b) \\
&= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
The output of the model or prediction is then done by taking the argmax of
the vector whose i'th element is P(Y=i|x).
.. math::
y_{pred} = argmax_i P(Y=i|x,W,b)
This tutorial presents a stochastic gradient descent optimization method
suitable for large datasets, and a conjugate gradient optimization method
that is suitable for smaller datasets.
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 4.3.2
"""
__docformat__ = 'restructedtext en'
import cPickle
import gzip
import os
import sys
import time
import numpy
import theano
import theano.tensor as T
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Classification is done by projecting data
points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(value=numpy.zeros((n_in, n_out),
dtype=theano.config.floatX),
name='W', borrow=True)
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(value=numpy.zeros((n_out,),
dtype=theano.config.floatX),
name='b', borrow=True)
# compute vector of class-membership probabilities in symbolic form
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# compute prediction as class whose probability is maximal in
# symbolic form
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# parameters of the model
self.params = [self.W, self.b]
def mean_square_error(self, y):
'''
Return mean squared error:
loss = ||Y - XB||^2
'''
return 0
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError('y should have the same shape as self.y_pred',
('y', target.type, 'y_pred', self.y_pred.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def load_data(dataset):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
#############
# LOAD DATA #
#############
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(os.path.split(__file__)[0], "..", "data", dataset)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
import urllib
origin = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
print 'Downloading data from %s' % origin
urllib.urlretrieve(origin, dataset)
print '... loading data'
# Load the dataset
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
#train_set, valid_set, test_set format: tuple(input, target)
#input is an numpy.ndarray of 2 dimensions (a matrix)
#witch row's correspond to an example. target is a
#numpy.ndarray of 1 dimensions (vector)) that have the same length as
#the number of rows in the input. It should give the target
#target to the example with the same index in the input.
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
dataset='mnist.pkl.gz',
batch_size=600):
"""
Demonstrate stochastic gradient descent optimization of a log-linear
model
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
# construct the logistic regression class
# Each MNIST image has size 28*28
classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10)
# the cost we minimize during training is the negative log likelihood of
# the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by
# the model on a minibatch
test_model = theano.function(inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]})
validate_model = theano.function(inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]})
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size:(index + 1) * batch_size],
y: train_set_y[index * batch_size:(index + 1) * batch_size]})
###############
# TRAIN MODEL #
###############
print '... training the model'
# early-stopping parameters
patience = 5000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_params = None
best_validation_loss = numpy.inf
test_score = 0.
start_time = time.clock()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' % \
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = [test_model(i)
for i in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of best'
' model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = time.clock()
print(('Optimization complete with best validation score of %f %%,'
'with test performance %f %%') %
(best_validation_loss * 100., test_score * 100.))
print 'The code run for %d epochs, with %f epochs/sec' % (
epoch, 1. * epoch / (end_time - start_time))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.1fs' % ((end_time - start_time)))
if __name__ == '__main__':
sgd_optimization_mnist()
| mattdelhey/kaggle-galaxy | Old/CNN/logisticSGD.py | Python | mit | 15,297 |
#***************************************************************************
#* *
#* Copyright (c) 2014 Daniel Falck <ddfalck@gmail.com> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
'''
This macro is used in conjunction with the toolpathparams script to create an object that represents a tool for use in a CNC program. Create a group and then select it- then run the macro.
You will have to edit the parameters inside the Data tab of the tool object.
'''
import PathScripts
import toolpathparams as tp
tl = FreeCAD.ActiveDocument.addObject("App::FeaturePython","Tools")
tp.ToolParams(tl)
tp.ViewProviderToolParams(tl.ViewObject)
sel = FreeCADGui.Selection.getSelection()
g = sel[0]
g.addObject(tl)
App.activeDocument().recompute()
| cypsun/FreeCAD | src/Mod/Path/PathScripts/Macros/create_tool.py | Python | lgpl-2.1 | 2,174 |
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp import models, fields, api
class clv_annotation(models.Model):
_inherit = 'clv_annotation'
_defaults = {
'active_history': True,
}
| CLVsol/odoo_solutions | vfmng/clv_annotation.py | Python | agpl-3.0 | 1,575 |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
from wx.lib.ClickableHtmlWindow import PyClickableHtmlWindow
from robotide.version import VERSION
from robotide.pluginapi import ActionInfo
class ReleaseNotes(object):
"""Shows release notes of the current version.
The release notes tab will automatically be shown once per release.
The user can also view them on demand by selecting "Release Notes"
from the help menu.
"""
def __init__(self, application):
self.application = application
settings = application.settings
self.version_shown = settings.get('version_shown', '')
self._view = None
self.enable()
def enable(self):
self.application.frame.actions.register_action(ActionInfo('Help', 'Release Notes', self.show,
doc='Show the release notes'))
self.show_if_updated()
def show_if_updated(self):
if self.version_shown != VERSION:
self.show()
self.application.settings['version_shown'] = VERSION
def show(self, event=None):
if not self._view:
self._view = self._create_view()
self.application.frame.notebook.AddPage(self._view, "Release Notes", select=False)
self.application.frame.notebook.show_tab(self._view)
def bring_to_front(self):
if self._view:
self.application.frame.notebook.show_tab(self._view)
def _create_view(self):
panel = wx.Panel(self.application.frame.notebook)
html_win = PyClickableHtmlWindow(panel, -1)
html_win.SetStandardFonts()
html_win.SetPage(WELCOME_TEXT + RELEASE_NOTES)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(html_win, 1, wx.EXPAND|wx.ALL, border=8)
panel.SetSizer(sizer)
return panel
WELCOME_TEXT = """
<h2>Welcome to use RIDE version %s</h2>
<p>Thank you for using the Robot Framework IDE (RIDE).</p>
<p>Visit RIDE on the web:</p>
<ul>
<li><a href="https://github.com/robotframework/RIDE">
RIDE project page on github</a></li>
<li><a href="https://github.com/robotframework/RIDE/wiki/Installation-Instructions">
Installation instructions</a></li>
<li><a href="https://github.com/robotframework/RIDE/wiki/Release-notes">
Release notes</a></li>
</ul>
""" % VERSION
# *** DO NOT EDIT THE CODE BELOW MANUALLY ***
# Release notes are updated automatically by package.py script whenever
# a numbered distribution is created.
RELEASE_NOTES = """
<h2>Release notes for 1.5a2</h2>
<table border="1">
<tr>
<td><p><b>ID</b></p></td>
<td><p><b>Type</b></p></td>
<td><p><b>Priority</b></p></td>
<td><p><b>Summary</b></p></td>
</tr>
<tr>
<td><a href="http://github.com/robotframework/RIDE/issues/http://github.com/robotframework/RIDE/issues/">Issue http://github.com/robotframework/RIDE/issues/</a></td>
<td>bug</td>
<td>critical</td>
<td>Cannot import remote library in 1.4.1</td>
</tr>
<tr>
<td><a href="http://github.com/robotframework/RIDE/issues/http://github.com/robotframework/RIDE/issues/">Issue http://github.com/robotframework/RIDE/issues/</a></td>
<td>enhancement</td>
<td>critical</td>
<td>Support RF 2.9</td>
</tr>
<tr>
<td><a href="http://github.com/robotframework/RIDE/issues/http://github.com/robotframework/RIDE/issues/">Issue http://github.com/robotframework/RIDE/issues/</a></td>
<td>bug</td>
<td>high</td>
<td>'--monitorcolors' and '--monitorwidth' is deprecated WARN message</td>
</tr>
<tr>
<td><a href="http://github.com/robotframework/RIDE/issues/http://github.com/robotframework/RIDE/issues/">Issue http://github.com/robotframework/RIDE/issues/</a></td>
<td>bug</td>
<td>medium</td>
<td>Highlighting selected cell (and matches) does not work.</td>
</tr>
<tr>
<td><a href="http://github.com/robotframework/RIDE/issues/http://github.com/robotframework/RIDE/issues/">Issue http://github.com/robotframework/RIDE/issues/</a></td>
<td>bug</td>
<td>medium</td>
<td>RIDE Log shows "The C++ part of the VariablesListEditor object has been deleted, attribute access no longer allowed"</td>
</tr>
<tr>
<td><a href="http://github.com/robotframework/RIDE/issues/http://github.com/robotframework/RIDE/issues/">Issue http://github.com/robotframework/RIDE/issues/</a></td>
<td>bug</td>
<td>medium</td>
<td>"Find Where Used" in editor not working</td>
</tr>
</table>
<p>Altogether 6 issues.</p>
"""
| fingeronthebutton/RIDE | src/robotide/application/releasenotes.py | Python | apache-2.0 | 5,022 |
#!/usr/bin/env python3
groups = []
with open('input.txt', 'r') as f:
groups = f.read().split('\n\n')
def get_group_answers(groups):
group_yes = []
for group_val in groups:
group_yes.append(len(list(set(''.join(group_val.split('\n'))))))
return group_yes
# this is waaaay ugly, but it works
def get_consistent_group_answers(groups):
group_yes = []
for group in groups:
group_items = set(list(filter(None, group.split('\n'))))
consensus = []
for person_answers in group_items:
if len(group_items) == 1:
consensus.extend(list(person_answers))
else:
for val in person_answers:
exists = True
if val not in consensus:
for check_group in group_items:
if val not in check_group:
exists = False
break
if exists == True:
consensus.append(val)
group_yes.append(len(consensus))
return group_yes
#--- challenge 1
group_answers = get_group_answers(groups)
print("Solution to challenge 1: {}".format(sum(group_answers)))
#--- challenge 2
group_yes = get_consistent_group_answers(groups)
print("Solution to challenge 2: {}".format(sum(group_yes)))
| jekhokie/scriptbox | python--advent-of-code/2020/6/solve.py | Python | mit | 1,211 |
#Copyright 2018 Tim Wentlau.
#Distributed under the MIT License. See LICENSE in root of project.
import os
import sys
from .default_config import get_default_config
class Application(object):
def __init__(self, user_config = None):
print("Starting kervi (u)application")
#import inspect
import getopt
config_files = []
opts, args = getopt.getopt(sys.argv[1:], "c", ["config_file=", "as-service", "install-service", "uninstall-service", "start-service", "stop-service", "restart-service", "status-service", "detect-devices"])
for opt, arg in opts:
if opt in ("-c", "--config_file"):
if os.path.isfile(arg):
config_files += [arg]
else:
print("Specified config file not found:", arg)
#script_path = os.path.abspath(inspect.stack()[1][1])
#script_name = os.path.basename(script_path)
#script_name, script_ext = os.path.splitext(script_name)
#config_files += [script_name +".config.json"]
#config_files += ["kervi.config.json"]
selected_config_file = None
for config_file in config_files:
if os.path.isfile(config_file):
selected_config_file = config_file
break
from kervi.config import load
self.config = load(
config_file=selected_config_file,
config_user=user_config,
config_base=get_default_config()
)
import kervi.hal as hal
hal_driver = hal._load(self.config.platform.driver)
if hal_driver:
print("platform driver:", hal_driver)
| kervi/kervi | ukervi/ukervi/application/__init__.py | Python | mit | 1,697 |
# -*- coding: utf-8 -*-
from content_plugin import ContentPlugin
from plugin_ckeditor import CKEditor
from gluon.tools import PluginManager
class ContentVideo(ContentPlugin):
def get_item_url(self, item):
return URL('plugin_video', 'index.html', args=[item.unique_id])
def create_content(self, item):
self.db.plugin_video_content.insert(
credit_line="{} {}".format(
self.auth.user.first_name,
self.auth.user.last_name
),
description='',
item_id=item.unique_id,
renditions=[]
)
def get_icon(self):
return I(_class="fa fa-file-video-o")
def get_name(self):
return self.T("Video")
def preview(self, item):
"""
Show the item preview on list's or in packages.
"""
super(ContentVideo, self).preview(item)
info = self.db.plugin_video_content(item_id=item.unique_id)
return XML(
self.response.render(
'plugin_video/preview.html',
dict(item=item, info=info)))
def export(self, item, export_dir):
"""
export the video item
"""
import os
import json
# put the video general info
db = self.db
content = db.plugin_video_content(item_id=item.unique_id)
with open(os.path.join(export_dir, 'video.json'), 'w') as f:
f.write(content.as_json())
for r_id in content.renditions:
# for eatch rendition put the descriptive info and the video link
rend = db.plugin_video_rendition(r_id)
rend_dir = os.path.join(export_dir, str(r_id))
os.mkdir(rend_dir)
with open(os.path.join(rend_dir, 'rendition.json'), 'w') as f:
f.write(json.dumps({
'id': rend.id,
'purpose': rend.purpose,
'video': URL(
'default', 'download', args=[rend.video],
scheme=True, host=True),
}))
# done
return
def get_changelog_url(self, item):
return URL('plugin_video', 'changelog', args=[item.unique_id])
def get_full_text(self, item):
content = db.plugin_video_content(item_id=item.unique_id)
output = self.response.render(
'plugin_video/full_text.txt',
dict(content=content, item=item))
return unicode(output.decode('utf-8'))
# define tables of this plugin
def _():
plugins = PluginManager('video', app=None)
if plugins.video.app is not None:
# this will register the content/type on the application
plugins.video.app.registerContentType('video', ContentVideo())
if not hasattr(db, 'plugin_video_content'):
# the video files
tbl = db.define_table(
'plugin_video_rendition',
Field('purpose', 'string', length=50, default='web'),
Field(
'video', 'upload', uploadseparate=True, autodelete=True
),
)
tbl.purpose.comment = T('''
Descrive the purpose of this rendition of the video, e.g.:
web, social networks, etc.
''')
tbl.purpose.label = T('Purpose')
tbl.video.label = T('Video')
tbl.video.requires = IS_NOT_EMPTY()
# configure ckeditor
editor = CKEditor(db=db)
# content description
tbl = db.define_table(
'plugin_video_content',
Field('credit_line', 'string', length=150, default=''),
Field(
'description', 'text',
label=T('Description'),
default=''
),
Field('renditions', 'list:reference plugin_video_rendition'),
Field('item_id', 'string', length=64),
auth.signature,
)
tbl.item_id.readable = False
tbl.item_id.writable = False
tbl.credit_line.label = T("Credit line")
tbl.description.label = T('Description')
tbl.description.widget = editor.widget
tbl.renditions.label = T("Renditions")
tbl.renditions.default = []
tbl.renditions.writable = False
tbl.renditions.readable = False
# enable record versioning
tbl._enable_record_versioning()
# add callback for item cleanup on delete.
def __plugin_video_item_on_delete(s):
item = s.select().first()
if item.item_type == 'video':
# cleanup here
cnt = db.plugin_video_content(item_id=item.unique_id)
db(
db.plugin_video_rendition.id.belongs(
cnt.renditions)).delete()
db(
db.plugin_video_content.item_id == item.unique_id
).delete()
return False # remember to procced
db.item._before_delete.insert(0, __plugin_video_item_on_delete)
return
_()
| ybenitezf/nstock | models/plugin_video.py | Python | mit | 5,245 |
from cebulany.models import Member
class MemberQuery:
@classmethod
def get_list_query(cls, name=None, limit=None, order="name"):
query = Member.query
if order == "name":
query = query.order_by(Member.name)
elif order == "table":
query = query.order_by(Member.is_active.desc(), Member.join_date, Member.name)
query = cls._filter_by_name(query, name)
query = cls._limit(query, limit)
return query
@staticmethod
def _filter_by_name(query, name):
name = name and name.strip()
if not name:
return query
args = [
Member.name.ilike('%%%s%%' % arg.replace('%', r'\%'))
for arg in name.split()
]
return query.filter(*args)
@staticmethod
def _limit(query, limit):
if limit is None:
return query
return query.limit(limit)
| hackerspace-silesia/cebulany-manager | cebulany/queries/member.py | Python | mit | 916 |
import subprocess
import os
import signal
import ctypes
class ProcessManager(object):
def __init__(self):
self._process = None
self._stdout = None
self._stderr = None
def start_process(self, *args):
self._process = subprocess.Popen(args, stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
# Important for Python 3:
# (opens stdout and stderr
# in text mode, returning str
# instead of bytes)
universal_newlines=True)
self._stdout = None
self._stderr = None
def send_terminate(self, signal_name):
if os.name != 'nt':
os.kill(self._process.pid, getattr(signal, signal_name))
else:
self._set_handler_to_ignore_one_sigint()
ctypes.windll.kernel32.GenerateConsoleCtrlEvent(0, 0)
def _set_handler_to_ignore_one_sigint(self):
orig_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, lambda signum, frame:
signal.signal(signal.SIGINT, orig_handler))
def get_stdout(self):
self.wait_until_finished()
return self._stdout
def get_stderr(self):
self.wait_until_finished()
return self._stderr
def log_stdout_and_stderr(self):
self.wait_until_finished()
print('STDOUT:')
print(self._stdout)
print('STDERR:')
print(self._stderr)
def wait_until_finished(self):
if self._stdout is None:
self._stdout, self._stderr = self._process.communicate()
def get_runner(self, interpreter, robot_path):
run = os.path.join(robot_path, 'run.py')
if 'jython' not in interpreter:
return [interpreter, run]
jython_home = os.getenv('JYTHON_HOME')
if not jython_home:
raise RuntimeError('This test requires JYTHON_HOME environment variable to be set.')
return [self._get_java(), '-Dpython.home=%s' % jython_home,
'-classpath', self._get_classpath(jython_home),
'org.python.util.jython', run]
def _get_java(self):
java_home = os.getenv('JAVA_HOME')
if not java_home:
return 'java'
if java_home.startswith('"') and java_home.endswith('"'):
java_home = java_home[1:-1]
return os.path.join(java_home, 'bin', 'java')
def _get_classpath(self, jython_home):
jython_jar = os.path.join(jython_home, 'jython.jar')
cp = jython_jar + os.pathsep + os.getenv('CLASSPATH', '')
return cp.strip(':;')
| userzimmermann/robotframework-python3 | atest/robot/running/ProcessManager.py | Python | apache-2.0 | 2,776 |
''' script to move users from old cognitive atlas mysql to postgres db with
schema based on a custom django users model'''
import os
import pymysql
import psycopg2
# postgres schema
# id | integer
# password | character varying(128)
# last_login | timestamp with time zone
# is_superuser | boolean
# username | character varying(150)
# first_name | character varying(30)
# last_name | character varying(30)
# email | character varying(254)
# is_staff | boolean
# is_active | boolean
# date_joined | timestamp with time zone
# obfuscate | boolean
# interest_tags | character varying(512)
# old_id | character varying(36)
# mysql schema
# id | varchar(36)
# user_first_name | varchar(255)
# user_last_name | varchar(255)
# user_rank | varchar(10)
# user_full_name | varchar(255)
# user_honorific_prefix | varchar(255)
# user_honorific_suffix | varchar(255)
# user_org_id | varchar(36)
# user_title | varchar(255)
# user_interest_tags | varchar(512)
# user_pass | varchar(512)
# user_telephone | varchar(255)
# user_email | varchar(255)
# event_stamp | timestamp
# user_gender | varchar(255)
# user_ethnicity | varchar(255)
# user_accepted | varchar(50)
# user_race | varchar(255)
# user_specialist_tags | varchar(2000)
# user_obfuscate | varchar(255)
# user_handle | varchar(255)
# add title, and specialist tags
def user_imports():
post_conn = psycopg2.connect(
dbname=os.environ.get('POSTGRES_NAME'),
user=os.environ.get('POSTGRES_USER'),
password=os.environ.get('POSTGRES_PASSWORD'),
host=os.environ.get('POSTGRES_HOST')
)
post_cur = post_conn.cursor()
my_conn = pymysql.connect(host='localhost', db='cogat', user='root')
my_cur = my_conn.cursor()
select_query = '''select id, user_first_name, user_last_name, user_interest_tags,
user_pass, user_email, user_accepted, user_obfuscate, user_title, user_specialist_tags, user_handle, event_stamp, user_rank
FROM table_user'''
insert_query = '''INSERT INTO users_user (password, email, is_active,
obfuscate, interest_tags, old_id, title, specialist_tags,
first_name, last_name, is_superuser, username, is_staff,
date_joined, rank)
VALUES('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}',
'{}', '{}', False, '{}', False, '{}', '{}')'''
old_user_lookup = "SELECT old_id from users_user where old_id='{}'"
my_cur.execute(select_query)
old_users = my_cur.fetchall()
my_cur.close()
my_conn.close()
for old_user in old_users:
post_cur.execute(old_user_lookup.format(old_user[0]))
if not post_cur.fetchone():
print(old_user)
obfuscate = False if old_user[7] == 'show' else True
is_active = True if old_user[6] == 'on' else False
password = "sha1$${}".format(old_user[4])
new_user = insert_query.format(
password,
old_user[5],
is_active,
obfuscate,
old_user[3],
old_user[0],
old_user[8],
old_user[9],
old_user[1],
old_user[2],
old_user[5],
old_user[11],
old_user[12]
)
print(new_user)
post_cur.execute(new_user)
post_conn.commit()
if __name__ == '__main__':
user_imports()
| rwblair/cogat | scripts/user_import.py | Python | mit | 3,717 |
# coding: utf8
"""
cairocffi.surface
~~~~~~~~~~~~~~~~~
Bindings for the various types of surface objects.
:copyright: Copyright 2013 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
import io
import sys
import ctypes
import weakref
from . import ffi, cairo, _check_status, constants
from .fonts import FontOptions, _encode_string
SURFACE_TARGET_KEY = ffi.new('cairo_user_data_key_t *')
def _make_read_func(file_obj):
"""Return a CFFI callback that reads from a file-like object."""
@ffi.callback("cairo_read_func_t", error=constants.STATUS_READ_ERROR)
def read_func(_closure, data, length):
string = file_obj.read(length)
if len(string) < length: # EOF too early
return constants.STATUS_READ_ERROR
ffi.buffer(data, length)[:len(string)] = string
return constants.STATUS_SUCCESS
return read_func
def _make_write_func(file_obj):
"""Return a CFFI callback that writes to a file-like object."""
if file_obj is None:
return ffi.NULL
@ffi.callback("cairo_write_func_t", error=constants.STATUS_WRITE_ERROR)
def write_func(_closure, data, length):
file_obj.write(ffi.buffer(data, length))
return constants.STATUS_SUCCESS
return write_func
def _encode_filename(filename):
"""Return a byte string, encoding Unicode with the filesystem encoding."""
if not isinstance(filename, bytes):
filename = filename.encode(sys.getfilesystemencoding())
return ffi.new('char[]', filename)
def from_buffer(obj):
"""Return ``(pointer_address, length_in_bytes)`` for a buffer object."""
if hasattr(obj, 'buffer_info'):
# Looks like a array.array object.
address, length = obj.buffer_info()
return address, length * obj.itemsize
else:
# Other buffers.
# XXX Unfortunately ctypes.c_char.from_buffer
# does not have length information,
# and we’re not sure that len(obj) is measured in bytes.
# (It’s not for array.array, though that is taken care of.)
return ctypes.addressof(ctypes.c_char.from_buffer(obj)), len(obj)
class KeepAlive(object):
"""
Keep some objects alive until a callback is called.
:attr:`closure` is a tuple of cairo_destroy_func_t and void* cdata objects,
as expected by cairo_surface_set_mime_data().
Either :meth:`save` must be called before the callback,
or none of them must be called.
"""
instances = set()
def __init__(self, *objects):
self.objects = objects
weakself = weakref.ref(self)
def closure(_):
value = weakself()
if value is not None:
value.instances.remove(value)
callback = ffi.callback(
'cairo_destroy_func_t', closure)
# cairo wants a non-NULL closure pointer.
self.closure = (callback, callback)
def save(self):
"""Start keeping a reference to the passed objects."""
self.instances.add(self)
class Surface(object):
"""The base class for all surface types.
Should not be instantiated directly, but see :doc:`cffi_api`.
An instance may be returned for cairo surface types
that are not (yet) defined in cairocffi.
A :class:`Surface` represents an image,
either as the destination of a drawing operation
or as source when drawing onto another surface.
To draw to a :class:`Surface`,
create a cairo :class:`Context` with the surface as the target.
There are different sub-classes of :class:`Surface`
for different drawing backends;
for example, :class:`ImageSurface` is a bitmap image in memory.
The initial contents of a surface after creation
depend upon the manner of its creation.
If cairo creates the surface and backing storage for the user,
it will be initially cleared;
for example, :class:`ImageSurface` and :meth:`create_similar`.
Alternatively, if the user passes in a reference
to some backing storage and asks cairo to wrap that in a :class:`Surface`,
then the contents are not modified;
for example, :class:`ImageSurface` with a :obj:`data` argument.
"""
def __init__(self, pointer, target_keep_alive=None):
self._pointer = ffi.gc(pointer, cairo.cairo_surface_destroy)
self._check_status()
if target_keep_alive not in (None, ffi.NULL):
keep_alive = KeepAlive(target_keep_alive)
_check_status(cairo.cairo_surface_set_user_data(
self._pointer, SURFACE_TARGET_KEY, *keep_alive.closure))
keep_alive.save()
def _check_status(self):
_check_status(cairo.cairo_surface_status(self._pointer))
@staticmethod
def _from_pointer(pointer, incref):
"""Wrap an existing :c:type:`cairo_surface_t *` cdata pointer.
:type incref: bool
:param incref:
Whether increase the :ref:`reference count <refcounting>` now.
:return:
A new instance of :class:`Surface` or one of its sub-classes,
depending on the surface’s type.
"""
if pointer == ffi.NULL:
raise ValueError('Null pointer')
if incref:
cairo.cairo_surface_reference(pointer)
self = object.__new__(SURFACE_TYPE_TO_CLASS.get(
cairo.cairo_surface_get_type(pointer), Surface))
Surface.__init__(self, pointer) # Skip the subclass’s __init__
return self
def create_similar(self, content, width, height):
"""Create a new surface that is as compatible as possible
for uploading to and the use in conjunction with this surface.
For example the new surface will have the same fallback resolution
and :class:`FontOptions`.
Generally, the new surface will also use the same backend as other,
unless that is not possible for some reason.
Initially the surface contents are all 0
(transparent if contents have transparency, black otherwise.)
Use :meth:`create_similar_image` if you need an image surface
which can be painted quickly to the target surface.
:param content: the :ref:`CONTENT` string for the new surface.
:param width: width of the new surface (in device-space units)
:param height: height of the new surface (in device-space units)
:type content: str
:type width: int
:type height: int
:returns: A new instance of :class:`Surface` or one of its subclasses.
"""
return Surface._from_pointer(
cairo.cairo_surface_create_similar(
self._pointer, content, width, height),
incref=False)
def create_similar_image(self, content, width, height):
"""
Create a new image surface that is as compatible as possible
for uploading to and the use in conjunction with this surface.
However, this surface can still be used like any normal image surface.
Initially the surface contents are all 0
(transparent if contents have transparency, black otherwise.)
Use :meth:`create_similar` if you don't need an image surface.
:param format: the :ref:`FORMAT` string for the new surface
:param width: width of the new surface, (in device-space units)
:param height: height of the new surface (in device-space units)
:type format: str
:type width: int
:type height: int
:returns: A new :class:`ImageSurface` instance.
"""
return Surface._from_pointer(
cairo.cairo_surface_create_similar_image(
self._pointer, content, width, height),
incref=False)
def create_for_rectangle(self, x, y, width, height):
"""
Create a new surface that is a rectangle within this surface.
All operations drawn to this surface are then clipped and translated
onto the target surface.
Nothing drawn via this sub-surface outside of its bounds
is drawn onto the target surface,
making this a useful method for passing constrained child surfaces
to library routines that draw directly onto the parent surface,
i.e. with no further backend allocations,
double buffering or copies.
.. note::
As of cairo 1.12,
the semantics of subsurfaces have not been finalized yet
unless the rectangle is in full device units,
is contained within the extents of the target surface,
and the target or subsurface's device transforms are not changed.
:param x:
The x-origin of the sub-surface
from the top-left of the target surface (in device-space units)
:param y:
The y-origin of the sub-surface
from the top-left of the target surface (in device-space units)
:param width:
Width of the sub-surface (in device-space units)
:param height:
Height of the sub-surface (in device-space units)
:type x: float
:type y: float
:type width: float
:type height: float
:returns:
A new :class:`Surface` object.
*New in cairo 1.10.*
"""
return Surface._from_pointer(
cairo.cairo_surface_create_for_rectangle(
self._pointer, x, y, width, height),
incref=False)
def get_content(self):
"""Returns the :ref:`CONTENT` string of this surface,
which indicates whether the surface contains color
and/or alpha information.
"""
return cairo.cairo_surface_get_content(self._pointer)
def has_show_text_glyphs(self):
"""Returns whether the surface supports sophisticated
:meth:`Context.show_text_glyphs` operations.
That is, whether it actually uses the text and cluster data
provided to a :meth:`Context.show_text_glyphs` call.
.. note::
Even if this method returns :obj:`False`,
:meth:`Context.show_text_glyphs` operation targeted at surface
will still succeed.
It just will act like a :meth:`Context.show_glyphs` operation.
Users can use this method to avoid computing UTF-8 text
and cluster mapping if the target surface does not use it.
"""
return bool(cairo.cairo_surface_has_show_text_glyphs(self._pointer))
def set_device_offset(self, x_offset, y_offset):
""" Sets an offset that is added to the device coordinates
determined by the CTM when drawing to surface.
One use case for this method is
when we want to create a :class:`Surface` that redirects drawing
for a portion of an onscreen surface
to an offscreen surface in a way that is
completely invisible to the user of the cairo API.
Setting a transformation via :meth:`Context.translate`
isn't sufficient to do this,
since methods like :meth:`Context.device_to_user`
will expose the hidden offset.
Note that the offset affects drawing to the surface
as well as using the surface in a source pattern.
:param x_offset:
The offset in the X direction, in device units
:param y_offset:
The offset in the Y direction, in device units
"""
cairo.cairo_surface_set_device_offset(
self._pointer, x_offset, y_offset)
self._check_status()
def get_device_offset(self):
"""Returns the previous device offset set by :meth:`set_device_offset`.
:returns: ``(x_offset, y_offset)``
"""
offsets = ffi.new('double[2]')
cairo.cairo_surface_get_device_offset(
self._pointer, offsets + 0, offsets + 1)
return tuple(offsets)
def set_fallback_resolution(self, x_pixels_per_inch, y_pixels_per_inch):
"""
Set the horizontal and vertical resolution for image fallbacks.
When certain operations aren't supported natively by a backend,
cairo will fallback by rendering operations to an image
and then overlaying that image onto the output.
For backends that are natively vector-oriented,
this method can be used to set the resolution
used for these image fallbacks,
(larger values will result in more detailed images,
but also larger file sizes).
Some examples of natively vector-oriented backends are
the ps, pdf, and svg backends.
For backends that are natively raster-oriented,
image fallbacks are still possible,
but they are always performed at the native device resolution.
So this method has no effect on those backends.
.. note::
The fallback resolution only takes effect
at the time of completing a page
(with :meth:`show_page` or :meth:`copy_page`)
so there is currently no way to have
more than one fallback resolution in effect on a single page.
The default fallback resoultion is
300 pixels per inch in both dimensions.
:param x_pixels_per_inch: horizontal resolution in pixels per inch
:type x_pixels_per_inch: float
:param y_pixels_per_inch: vertical resolution in pixels per inch
:type y_pixels_per_inch: float
"""
cairo.cairo_surface_set_fallback_resolution(
self._pointer, x_pixels_per_inch, y_pixels_per_inch)
self._check_status()
def get_fallback_resolution(self):
"""Returns the previous fallback resolution
set by :meth:`set_fallback_resolution`,
or default fallback resolution if never set.
:returns: ``(x_pixels_per_inch, y_pixels_per_inch)``
"""
ppi = ffi.new('double[2]')
cairo.cairo_surface_get_fallback_resolution(
self._pointer, ppi + 0, ppi + 1)
return tuple(ppi)
def get_font_options(self):
""" Retrieves the default font rendering options for the surface.
This allows display surfaces to report the correct subpixel order
for rendering on them,
print surfaces to disable hinting of metrics and so forth.
The result can then be used with :class:`ScaledFont`.
:returns: A new :class:`FontOptions` object.
"""
font_options = FontOptions()
cairo.cairo_surface_get_font_options(
self._pointer, font_options._pointer)
return font_options
def set_mime_data(self, mime_type, data):
"""
Attach an image in the format :obj:`mime_type` to this surface.
To remove the data from a surface,
call this method with same mime type and :obj:`None` for data.
The attached image (or filename) data can later
be used by backends which support it
(currently: PDF, PS, SVG and Win32 Printing surfaces)
to emit this data instead of making a snapshot of the surface.
This approach tends to be faster
and requires less memory and disk space.
The recognized MIME types are the following:
``"image/png"``
The Portable Network Graphics image file format (ISO/IEC 15948).
``"image/jpeg"``
The Joint Photographic Experts Group (JPEG)
image coding standard (ISO/IEC 10918-1).
``"image/jp2"``
The Joint Photographic Experts Group (JPEG) 2000
image coding standard (ISO/IEC 15444-1).
``"text/x-uri"``
URL for an image file (unofficial MIME type).
See corresponding backend surface docs
for details about which MIME types it can handle.
Caution: the associated MIME data will be discarded
if you draw on the surface afterwards.
Use this method with care.
:param mime_type: The MIME type of the image data.
:type mime_type: ASCII string
:param data: The image data to attach to the surface.
:type data: bytes
*New in cairo 1.10.*
"""
mime_type = ffi.new('char[]', mime_type.encode('utf8'))
if data is None:
_check_status(cairo.cairo_surface_set_mime_data(
self._pointer, mime_type, ffi.NULL, 0, ffi.NULL, ffi.NULL))
else:
# TODO: avoid making a copy here if possible.
length = len(data)
data = ffi.new('char[]', data)
keep_alive = KeepAlive(data, mime_type)
_check_status(cairo.cairo_surface_set_mime_data(
self._pointer, mime_type, data, length,
*keep_alive.closure))
keep_alive.save() # Only on success
def get_mime_data(self, mime_type):
"""Return mime data previously attached to surface
using the specified mime type.
:param mime_type: The MIME type of the image data.
:type mime_type: ASCII string
:returns:
A CFFI buffer object, or :obj:`None`
if no data has been attached with the given mime type.
*New in cairo 1.10.*
"""
buffer_address = ffi.new('unsigned char **')
buffer_length = ffi.new('unsigned long *')
mime_type = ffi.new('char[]', mime_type.encode('utf8'))
cairo.cairo_surface_get_mime_data(
self._pointer, mime_type, buffer_address, buffer_length)
return (ffi.buffer(buffer_address[0], buffer_length[0])
if buffer_address[0] != ffi.NULL else None)
def supports_mime_type(self, mime_type):
""" Return whether surface supports :obj:`mime_type`.
:param mime_type: The MIME type of the image data.
:type mime_type: ASCII string
*New in cairo 1.12.*
"""
mime_type = ffi.new('char[]', mime_type.encode('utf8'))
return bool(cairo.cairo_surface_supports_mime_type(
self._pointer, mime_type))
def mark_dirty(self):
"""Tells cairo that drawing has been done to surface
using means other than cairo,
and that cairo should reread any cached areas.
Note that you must call :meth:`flush` before doing such drawing.
"""
cairo.cairo_surface_mark_dirty(self._pointer)
self._check_status()
def mark_dirty_rectangle(self, x, y, width, height):
"""
Like :meth:`mark_dirty`,
but drawing has been done only to the specified rectangle,
so that cairo can retain cached contents
for other parts of the surface.
Any cached clip set on the surface will be reset by this method,
to make sure that future cairo calls have the clip set
that they expect.
:param x: X coordinate of dirty rectangle.
:param y: Y coordinate of dirty rectangle.
:param width: Width of dirty rectangle.
:param height: Height of dirty rectangle.
:type x: float
:type y: float
:type width: float
:type height: float
"""
cairo.cairo_surface_mark_dirty_rectangle(
self._pointer, x, y, width, height)
self._check_status()
def show_page(self):
"""Emits and clears the current page
for backends that support multiple pages.
Use :meth:`copy_page` if you don't want to clear the page.
:meth:`Context.show_page` is a convenience method for this.
"""
cairo.cairo_surface_show_page(self._pointer)
self._check_status()
def copy_page(self):
"""Emits the current page for backends that support multiple pages,
but doesn't clear it,
so that the contents of the current page will be retained
for the next page.
Use :meth:`show_page` if you want to get an empty page
after the emission.
"""
cairo.cairo_surface_copy_page(self._pointer)
self._check_status()
def flush(self):
"""Do any pending drawing for the surface
and also restore any temporary modifications
cairo has made to the surface's state.
This method must be called before switching
from drawing on the surface with cairo
to drawing on it directly with native APIs.
If the surface doesn't support direct access,
then this method does nothing.
"""
cairo.cairo_surface_flush(self._pointer)
self._check_status()
def finish(self):
"""This method finishes the surface
and drops all references to external resources.
For example, for the Xlib backend it means that
cairo will no longer access the drawable, which can be freed.
After calling :meth:`finish` the only valid operations on a surface
are getting and setting user data, flushing and finishing it.
Further drawing to the surface will not affect the surface
but will instead trigger a :class:`CairoError`
with a ``SURFACE_FINISHED`` status.
When the surface is garbage-collected, cairo will call :meth:`finish()`
if it hasn't been called already,
before freeing the resources associated with the surface.
"""
cairo.cairo_surface_finish(self._pointer)
self._check_status()
def write_to_png(self, target=None):
"""Writes the contents of surface as a PNG image.
:param target:
A filename,
a binary mode file-like object with a :meth:`~file.write` method,
or :obj:`None`.
:returns:
If :obj:`target` is :obj:`None`,
return the PNG contents as a byte string.
"""
return_bytes = target is None
if return_bytes:
target = io.BytesIO()
if hasattr(target, 'write'):
write_func = _make_write_func(target)
_check_status(cairo.cairo_surface_write_to_png_stream(
self._pointer, write_func, ffi.NULL))
else:
_check_status(cairo.cairo_surface_write_to_png(
self._pointer, _encode_filename(target)))
if return_bytes:
return target.getvalue()
class ImageSurface(Surface):
"""Creates an image surface of the specified format and dimensions.
If :obj:`data` is not :obj:`None`
its initial contents will be used as the initial image contents;
you must explicitly clear the buffer,
using, for example, :meth:`Context.rectangle` and :meth:`Context.fill`
if you want it cleared.
.. note::
Currently only :class:`array.array` buffers are supported on PyPy.
Otherwise, the surface contents are all initially 0.
(Specifically, within each pixel, each color or alpha channel
belonging to format will be 0.
The contents of bits within a pixel,
but not belonging to the given format are undefined).
:param format: :ref:`FORMAT` string for the surface to create.
:param width: Width of the surface, in pixels.
:param height: Height of the surface, in pixels.
:param data:
Buffer supplied in which to write contents,
or :obj:`None` to create a new buffer.
:param stride:
The number of bytes between the start of rows
in the buffer as allocated.
This value should always be computed by :meth:`format_stride_for_width`
before allocating the data buffer.
If omitted but :obj:`data` is given,
:meth:`format_stride_for_width` is used.
:type format: str
:type width: int
:type height: int
:type stride: int
"""
def __init__(self, format, width, height, data=None, stride=None):
if data is None:
pointer = cairo.cairo_image_surface_create(format, width, height)
else:
if stride is None:
stride = self.format_stride_for_width(format, width)
address, length = from_buffer(data)
if length < stride * height:
raise ValueError('Got a %d bytes buffer, needs at least %d.'
% (length, stride * height))
pointer = cairo.cairo_image_surface_create_for_data(
ffi.cast('char*', address), format, width, height, stride)
Surface.__init__(self, pointer, target_keep_alive=data)
@classmethod
def create_for_data(cls, data, format, width, height, stride=None):
"""Same as ``ImageSurface(format, width, height, data, stride)``.
Exists for compatibility with pycairo.
"""
return cls(format, width, height, data, stride)
@staticmethod
def format_stride_for_width(format, width):
"""
This method provides a stride value (byte offset between rows)
that will respect all alignment requirements
of the accelerated image-rendering code within cairo.
Typical usage will be of the form::
from cairocffi import ImageSurface
stride = ImageSurface.format_stride_for_width(format, width)
data = bytearray(stride * height)
surface = ImageSurface(format, width, height, data, stride)
:param format: A :ref:`FORMAT` string.
:param width: The desired width of the surface, in pixels.
:type format: str
:type width: int
:returns:
The appropriate stride to use given the desired format and width,
or -1 if either the format is invalid or the width too large.
"""
return cairo.cairo_format_stride_for_width(format, width)
@classmethod
def create_from_png(cls, source):
"""Decode a PNG file into a new image surface.
:param source:
A filename or
a binary mode file-like object with a :meth:`~file.read` method.
If you already have a byte string in memory,
use :class:`io.BytesIO`.
:returns: A new :class:`ImageSurface` instance.
"""
if hasattr(source, 'read'):
read_func = _make_read_func(source)
pointer = cairo.cairo_image_surface_create_from_png_stream(
read_func, ffi.NULL)
else:
pointer = cairo.cairo_image_surface_create_from_png(
_encode_filename(source))
self = object.__new__(cls)
Surface.__init__(self, pointer) # Skip ImageSurface.__init__
return self
def get_data(self):
"""Return the buffer pointing to the image’s pixel data,
encoded according to the surface’s :ref:`FORMAT` string.
A call to :meth:`flush` is required before accessing the pixel data
to ensure that all pending drawing operations are finished.
A call to :meth:`~Surface.mark_dirty` is required
after the data is modified.
:returns: A read-write CFFI buffer object.
"""
return ffi.buffer(
cairo.cairo_image_surface_get_data(self._pointer),
self.get_stride() * self.get_height())
def get_format(self):
"""Return the :ref:`FORMAT` string of the surface."""
return cairo.cairo_image_surface_get_format(self._pointer)
def get_width(self):
"""Return the width of the surface, in pixels."""
return cairo.cairo_image_surface_get_width(self._pointer)
def get_height(self):
"""Return the width of the surface, in pixels."""
return cairo.cairo_image_surface_get_height(self._pointer)
def get_stride(self):
"""Return the stride of the image surface in bytes
(or 0 if surface is not an image surface).
The stride is the distance in bytes
from the beginning of one row of the image data
to the beginning of the next row.
"""
return cairo.cairo_image_surface_get_stride(self._pointer)
class PDFSurface(Surface):
"""Creates a PDF surface of the specified size in PostScript points
to be written to :obj:`target`.
Note that the size of individual pages of the PDF output can vary.
See :meth:`set_size`.
The PDF surface backend recognizes the following MIME types
for the data attached to a surface (see :meth:`~Surface.set_mime_data`)
when it is used as a source pattern for drawing on this surface:
``image/jpeg`` and
``image/jp2``.
If any of them is specified, the PDF backend emits an image
with the content of MIME data
(with the ``/DCTDecode`` or ``/JPXDecode`` filter, respectively)
instead of a surface snapshot
(with the ``/FlateDecode`` filter),
which typically produces PDF with a smaller file size.
:obj:`target` can be :obj:`None` to specify no output.
This will generate a surface that may be queried and used as a source,
without generating a temporary file.
:param target:
A filename,
a binary mode file-like object with a :meth:`~file.write` method,
or :obj:`None`.
:param width_in_points:
Width of the surface, in points (1 point == 1/72.0 inch)
:param height_in_points:
Height of the surface, in points (1 point == 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
"""
def __init__(self, target, width_in_points, height_in_points):
if hasattr(target, 'write') or target is None:
write_func = _make_write_func(target)
pointer = cairo.cairo_pdf_surface_create_for_stream(
write_func, ffi.NULL, width_in_points, height_in_points)
else:
write_func = None
pointer = cairo.cairo_pdf_surface_create(
_encode_filename(target), width_in_points, height_in_points)
Surface.__init__(self, pointer, target_keep_alive=write_func)
def set_size(self, width_in_points, height_in_points):
"""Changes the size of a PDF surface
for the current (and subsequent) pages.
This method should only be called
before any drawing operations have been performed on the current page.
The simplest way to do this is to call this method
immediately after creating the surface
or immediately after completing a page with either
:meth:`~Surface.show_page` or :meth:`~Surface.copy_page`.
:param width_in_points:
New width of the page, in points (1 point == 1/72.0 inch)
:param height_in_points:
New height of the page, in points (1 point == 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
"""
cairo.cairo_pdf_surface_set_size(
self._pointer, width_in_points, height_in_points)
self._check_status()
def restrict_to_version(self, version):
"""Restricts the generated PDF file to :obj:`version`.
See :meth:`get_versions` for a list of available version values
that can be used here.
This method should only be called
before any drawing operations have been performed on the given surface.
The simplest way to do this is to call this method
immediately after creating the surface.
:param version: A :ref:`PDF_VERSION` string.
*New in cairo 1.10.*
"""
cairo.cairo_pdf_surface_restrict_to_version(self._pointer, version)
self._check_status()
@staticmethod
def get_versions():
"""Return the list of supported PDF versions.
See :meth:`restrict_to_version`.
:return: A list of :ref:`PDF_VERSION` strings.
*New in cairo 1.10.*
"""
versions = ffi.new('cairo_pdf_version_t const **')
num_versions = ffi.new('int *')
cairo.cairo_pdf_get_versions(versions, num_versions)
versions = versions[0]
return [versions[i] for i in range(num_versions[0])]
@staticmethod
def version_to_string(version):
"""Return the string representation of the given :ref:`PDF_VERSION`.
See :meth:`get_versions` for a way to get
the list of valid version ids.
*New in cairo 1.10.*
"""
c_string = cairo.cairo_pdf_version_to_string(version)
if c_string == ffi.NULL:
raise ValueError(version)
return ffi.string(c_string).decode('ascii')
class PSSurface(Surface):
"""Creates a PostScript surface of the specified size in PostScript points
to be written to :obj:`target`.
Note that the size of individual pages of the PostScript output can vary.
See :meth:`set_size`.
:obj:`target` can be :obj:`None` to specify no output.
This will generate a surface that may be queried and used as a source,
without generating a temporary file.
The PostScript surface backend recognizes the ``image/jpeg`` MIME type
for the data attached to a surface (see :meth:`~Surface.set_mime_data`)
when it is used as a source pattern for drawing on this surface.
If it is specified, the PostScript backend emits an image
with the content of MIME data (with the ``/DCTDecode`` filter)
instead of a surface snapshot (with the ``/FlateDecode`` filter),
which typically produces PostScript with a smaller file size.
:param target:
A filename,
a binary mode file-like object with a :meth:`~file.write` method,
or :obj:`None`.
:param width_in_points:
Width of the surface, in points (1 point == 1/72.0 inch)
:param height_in_points:
Height of the surface, in points (1 point == 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
"""
def __init__(self, target, width_in_points, height_in_points):
if hasattr(target, 'write') or target is None:
write_func = _make_write_func(target)
pointer = cairo.cairo_ps_surface_create_for_stream(
write_func, ffi.NULL, width_in_points, height_in_points)
else:
write_func = None
pointer = cairo.cairo_ps_surface_create(
_encode_filename(target), width_in_points, height_in_points)
Surface.__init__(self, pointer, target_keep_alive=write_func)
def dsc_comment(self, comment):
""" Emit a comment into the PostScript output for the given surface.
The comment is expected to conform to
the PostScript Language Document Structuring Conventions (DSC).
Please see that manual for details on the available comments
and their meanings.
In particular, the ``%%IncludeFeature`` comment allows
a device-independent means of controlling printer device features.
So the PostScript Printer Description Files Specification
will also be a useful reference.
The comment string must begin with a percent character (%)
and the total length of the string
(including any initial percent characters)
must not exceed 255 bytes.
Violating either of these conditions will
place surface into an error state.
But beyond these two conditions,
this method will not enforce conformance of the comment
with any particular specification.
The comment string should not have a trailing newline.
The DSC specifies different sections
in which particular comments can appear.
This method provides for comments to be emitted
within three sections:
the header, the Setup section, and the PageSetup section.
Comments appearing in the first two sections
apply to the entire document
while comments in the BeginPageSetup section
apply only to a single page.
For comments to appear in the header section,
this method should be called after the surface is created,
but before a call to :meth:`dsc_begin_setup`.
For comments to appear in the Setup section,
this method should be called after a call to :meth:`dsc_begin_setup`
but before a call to :meth:`dsc_begin_page_setup`.
For comments to appear in the PageSetup section,
this method should be called after a call to
:meth:`dsc_begin_page_setup`.
Note that it is only necessary to call :meth:`dsc_begin_page_setup`
for the first page of any surface.
After a call to :meth:`~Surface.show_page`
or :meth:`~Surface.copy_page`
comments are unambiguously directed
to the PageSetup section of the current page.
But it doesn't hurt to call this method
at the beginning of every page
as that consistency may make the calling code simpler.
As a final note,
cairo automatically generates several comments on its own.
As such, applications must not manually generate
any of the following comments:
Header section: ``%!PS-Adobe-3.0``, ``%%Creator``, ``%%CreationDate``,
``%%Pages``, ``%%BoundingBox``, ``%%DocumentData``,
``%%LanguageLevel``, ``%%EndComments``.
Setup section: ``%%BeginSetup``, ``%%EndSetup``.
PageSetup section: ``%%BeginPageSetup``, ``%%PageBoundingBox``,
``%%EndPageSetup``.
Other sections: ``%%BeginProlog``, ``%%EndProlog``, ``%%Page``,
``%%Trailer``, ``%%EOF``.
"""
cairo.cairo_ps_surface_dsc_comment(
self._pointer, _encode_string(comment))
self._check_status()
def dsc_begin_setup(self):
"""Indicate that subsequent calls to :meth:`dsc_comment` should
direct comments to the Setup section of the PostScript output.
This method should be called at most once per surface,
and must be called before any call to :meth:`dsc_begin_page_setup`
and before any drawing is performed to the surface.
See :meth:`dsc_comment` for more details.
"""
cairo.cairo_ps_surface_dsc_begin_setup(self._pointer)
self._check_status()
def dsc_begin_page_setup(self):
"""Indicate that subsequent calls to :meth:`dsc_comment` should
direct comments to the PageSetup section of the PostScript output.
This method is only needed for the first page of a surface.
It must be called after any call to :meth:`dsc_begin_setup`
and before any drawing is performed to the surface.
See :meth:`dsc_comment` for more details.
"""
cairo.cairo_ps_surface_dsc_begin_page_setup(self._pointer)
self._check_status()
def set_eps(self, eps):
"""
If :obj:`eps` is True,
the PostScript surface will output Encapsulated PostScript.
This method should only be called
before any drawing operations have been performed on the current page.
The simplest way to do this is to call this method
immediately after creating the surface.
An Encapsulated PostScript file should never contain
more than one page.
"""
cairo.cairo_ps_surface_set_eps(self._pointer, bool(eps))
self._check_status()
def get_eps(self):
"""Check whether the PostScript surface will output
Encapsulated PostScript.
"""
return bool(cairo.cairo_ps_surface_get_eps(self._pointer))
def set_size(self, width_in_points, height_in_points):
"""Changes the size of a PostScript surface
for the current (and subsequent) pages.
This method should only be called
before any drawing operations have been performed on the current page.
The simplest way to do this is to call this method
immediately after creating the surface
or immediately after completing a page with either
:meth:`~Surface.show_page` or :meth:`~Surface.copy_page`.
:param width_in_points:
New width of the page, in points (1 point == 1/72.0 inch)
:param height_in_points:
New height of the page, in points (1 point == 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
"""
cairo.cairo_ps_surface_set_size(
self._pointer, width_in_points, height_in_points)
self._check_status()
def restrict_to_level(self, level):
"""Restricts the generated PostScript file to :obj:`level`.
See :meth:`get_levels` for a list of available level values
that can be used here.
This method should only be called
before any drawing operations have been performed on the given surface.
The simplest way to do this is to call this method
immediately after creating the surface.
:param version: A :ref:`PS_LEVEL` string.
"""
cairo.cairo_ps_surface_restrict_to_level(self._pointer, level)
self._check_status()
@staticmethod
def get_levels():
"""Return the list of supported PostScript levels.
See :meth:`restrict_to_level`.
:return: A list of :ref:`PS_LEVEL` strings.
"""
levels = ffi.new('cairo_ps_level_t const **')
num_levels = ffi.new('int *')
cairo.cairo_ps_get_levels(levels, num_levels)
levels = levels[0]
return [levels[i] for i in range(num_levels[0])]
@staticmethod
def ps_level_to_string(level):
"""Return the string representation of the given :ref:`PS_LEVEL`.
See :meth:`get_levels` for a way to get
the list of valid level ids.
"""
c_string = cairo.cairo_ps_level_to_string(level)
if c_string == ffi.NULL:
raise ValueError(level)
return ffi.string(c_string).decode('ascii')
class SVGSurface(Surface):
"""Creates a SVG surface of the specified size in points
to be written to :obj:`target`.
:obj:`target` can be :obj:`None` to specify no output.
This will generate a surface that may be queried and used as a source,
without generating a temporary file.
The SVG surface backend recognizes the following MIME types
for the data attached to a surface (see :meth:`~Surface.set_mime_data`)
when it is used as a source pattern for drawing on this surface:
``image/png``,
``image/jpeg`` and
``text/x-uri``.
If any of them is specified, the SVG backend emits a href
with the content of MIME data instead of a surface snapshot
(PNG, Base64-encoded) in the corresponding image tag.
The unofficial MIME type ``text/x-uri`` is examined first.
If present, the URL is emitted as is:
assuring the correctness of URL is left to the client code.
If ``text/x-uri`` is not present,
but ``image/jpeg`` or ``image/png`` is specified,
the corresponding data is Base64-encoded and emitted.
:param target:
A filename,
a binary mode file-like object with a :meth:`~file.write` method,
or :obj:`None`.
:param width_in_points:
Width of the surface, in points (1 point == 1/72.0 inch)
:param height_in_points:
Height of the surface, in points (1 point == 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
"""
def __init__(self, target, width_in_points, height_in_points):
if hasattr(target, 'write') or target is None:
write_func = _make_write_func(target)
pointer = cairo.cairo_svg_surface_create_for_stream(
write_func, ffi.NULL, width_in_points, height_in_points)
else:
write_func = None
pointer = cairo.cairo_svg_surface_create(
_encode_filename(target), width_in_points, height_in_points)
Surface.__init__(self, pointer, target_keep_alive=write_func)
def restrict_to_version(self, version):
"""Restricts the generated SVG file to :obj:`version`.
See :meth:`get_versions` for a list of available version values
that can be used here.
This method should only be called
before any drawing operations have been performed on the given surface.
The simplest way to do this is to call this method
immediately after creating the surface.
:param version: A :ref:`SVG_VERSION` string.
"""
cairo.cairo_svg_surface_restrict_to_version(self._pointer, version)
self._check_status()
@staticmethod
def get_versions():
"""Return the list of supported SVG versions.
See :meth:`restrict_to_version`.
:return: A list of :ref:`SVG_VERSION` strings.
"""
versions = ffi.new('cairo_svg_version_t const **')
num_versions = ffi.new('int *')
cairo.cairo_svg_get_versions(versions, num_versions)
versions = versions[0]
return [versions[i] for i in range(num_versions[0])]
@staticmethod
def version_to_string(version):
"""Return the string representation of the given :ref:`SVG_VERSION`.
See :meth:`get_versions` for a way to get
the list of valid version ids.
"""
c_string = cairo.cairo_svg_version_to_string(version)
if c_string == ffi.NULL:
raise ValueError(version)
return ffi.string(c_string).decode('ascii')
class RecordingSurface(Surface):
"""A recording surface is a surface that records all drawing operations
at the highest level of the surface backend interface,
(that is, the level of paint, mask, stroke, fill, and show_text_glyphs).
The recording surface can then be "replayed" against any target surface
by using it as a source surface.
If you want to replay a surface so that the results in :obj:`target`
will be identical to the results that would have been obtained
if the original operations applied to the recording surface
had instead been applied to the target surface,
you can use code like this::
context = Context(target)
context.set_source_surface(recording_surface, 0, 0)
context.paint()
A recording surface is logically unbounded,
i.e. it has no implicit constraint on the size of the drawing surface.
However, in practice this is rarely useful as you wish to replay
against a particular target surface with known bounds.
For this case, it is more efficient to specify the target extents
to the recording surface upon creation.
The recording phase of the recording surface is careful
to snapshot all necessary objects (paths, patterns, etc.),
in order to achieve accurate replay.
:param content: The :ref:`CONTENT` string of the recording surface
:param extents:
The extents to record
as a ``(x, y, width, height)`` tuple of floats in device units,
or :obj:`None` to record unbounded operations.
``(x, y)`` are the coordinates of the top-left corner of the rectangle,
``(width, height)`` its dimensions.
*New in cairo 1.10*
*New in cairocffi 0.2*
"""
def __init__(self, content, extents):
extents = (ffi.new('cairo_rectangle_t *', extents)
if extents is not None else ffi.NULL)
Surface.__init__(
self, cairo.cairo_recording_surface_create(content, extents))
def get_extents(self):
"""Return the extents of the recording-surface.
:returns:
A ``(x, y, width, height)`` tuple of floats,
or :obj:`None` if the surface is unbounded.
*New in cairo 1.12*
"""
extents = ffi.new('cairo_rectangle_t *')
if cairo.cairo_recording_surface_get_extents(self._pointer, extents):
return (extents.x, extents.y, extents.width, extents.height)
def ink_extents(self):
"""Measures the extents of the operations
stored within the recording-surface.
This is useful to compute the required size of an image surface
(or equivalent) into which to replay the full sequence
of drawing operations.
:return: A ``(x, y, width, height)`` tuple of floats.
"""
extents = ffi.new('double[4]')
cairo.cairo_recording_surface_ink_extents(
self._pointer, extents + 0, extents + 1, extents + 2, extents + 3)
self._check_status()
return tuple(extents)
class Win32PrintingSurface(Surface):
""" Creates a cairo surface that targets the given DC.
The DC will be queried for its initial clip extents,
and this will be used as the size of the cairo surface.
The DC should be a printing DC; antialiasing will be ignored,
and GDI will be used as much as possible to draw to the surface.
The returned surface will be wrapped using the paginated surface
to provide correct complex rendering behaviour;
cairo_surface_show_page() and associated methods must be used
for correct output.
:param hdc:
The DC to create a surface for,
as obtained from :func:`win32gui.CreateDC`.
**Note**: this unsafely inteprets an integer as a pointer.
Make sure it actually points to a valid DC!
:type hdc: int
*New in cairocffi 0.6*
"""
def __init__(self, hdc):
pointer = cairo.cairo_win32_printing_surface_create(
ffi.cast('void*', hdc))
Surface.__init__(self, pointer)
SURFACE_TYPE_TO_CLASS = {
constants.SURFACE_TYPE_IMAGE: ImageSurface,
constants.SURFACE_TYPE_PDF: PDFSurface,
constants.SURFACE_TYPE_SVG: SVGSurface,
constants.SURFACE_TYPE_RECORDING: RecordingSurface,
constants.SURFACE_TYPE_WIN32_PRINTING: Win32PrintingSurface
}
| aESeguridad/GERE | venv/lib/python2.7/site-packages/cairocffi/surfaces.py | Python | gpl-3.0 | 49,514 |
#!/usr/bin/env python3
from modules import *
import configparser
import json
import re
#####################################################
## Database Connect
#####################################################
'''
Connects to the database using the connection string
'''
def database_connect():
# Read the config file
config = configparser.ConfigParser()
config.read('config.ini')
if 'database' not in config['DATABASE']:
config['DATABASE']['database'] = config['DATABASE']['user']
# Create a connection to the database
connection = None
try:
# Parses the config file and connects using the connect string
connection = pg8000.connect(database=config['DATABASE']['database'],
user=config['DATABASE']['user'],
password=config['DATABASE']['password'],
host=config['DATABASE']['host'])
except pg8000.OperationalError as e:
print("""Error, you haven't updated your config.ini or you have a bad
connection, please try again. (Update your files first, then check
internet connection)
""")
print(e)
except pg8000.ProgrammingError as e:
print(e)
# return the connection to use
pg8000.threadsafety=1
return connection
#####################################################
## Issue (new_issue, get all, get details)
#####################################################
#Add the details for a new issue to the database - details for new issue provided as parameters
def new_issue(title, creator, resolver, verifier, description):
if resolver=="":
resolver=None
if verifier=="":
verifier=None
if description=="":
description="Description not provided"
# TODO - add a issue
try:
connection=database_connect()
if connection is not None:
cursor=connection.cursor()
else:
return False
cursor.execute("INSERT INTO a3_issue (title, creator, resolver, verifier, description)\
VALUES ((%s), (%s), (%s), (%s), (%s));",(title,creator,resolver,verifier,description))
connection.commit()
except pg8000.ProgrammingError as e:
print(e)
return False
cursor.close()
connection.close()
# Insert a new issue to datebase
# return Ture if adding was successful
# return False if adding was unsuccessful
return True
#Update the details of an issue having the provided issue_id with the values provided as parameters
def update_issue(title, creator, resolver, verifier, description, issue_id):
if resolver in ["","None"]:
resolver=None
if verifier in ["","None"]:
verifier=None
if description=="":
description="Description not provided"
# TODO - updating a issue using db
try:
connection=database_connect()
if connection is not None:
cursor=connection.cursor()
else:
return False
cursor.execute("UPDATE a3_issue SET title=(%s), creator=(%s), resolver=(%s), verifier=(%s), description=(%s)\
where id=(%s);",(title,creator,resolver,verifier,description,issue_id))
connection.commit()
except pg8000.ProgrammingError as ex:
print(ex)
return False
cursor.close()
connection.close()
# return False if adding was unsuccessful
# return Ture if adding was successful
return True
'''
List all the user associated issues in the database for a given member
Issues which have the member_id parameter in the all_issue function below in any one or more of the creator, resolver, or verifier fields should be included in the result
'''
def all_issue(member_id):
# TODO - list all issues from db using sql
connection=database_connect()
if connection is not None:
cursor=connection.cursor()
else:
return [{'title':'You\'re not logged in',\
'creator':'0',\
'resolver':'0',\
'verifier:':'0',\
'description':'Maybe due to Authentication failure',\
'issue_id':'0'}]
try:
cursor.execute("SELECT title, creator, resolver, verifier, description, id FROM a3_issue \
WHERE creator=(%s) or resolver=(%s) or verifier=(%s);",(member_id,member_id,member_id));
except pg8000.ProgrammingError:
cursor.close()
connection.close()
return [{}]
result=cursor.fetchall();
cursor.close()
connection.close()
strResult=[]
for record in result:
__=[str(i) for i in record]
strResult.append(__)
issue_db = []
issue_db.extend(strResult)
issue = [{
'title': row[0],
'creator': row[1],
'resolver': row[2],
'verifier': row[3],
'description': row[4],
'issue_id': row[5]
} for row in issue_db]
return issue
'''
Find the associated issues for the user with the given userId based on the searchString provided as the parameter, and based on the assignment description
'''
def all_issue_find(searchString, member_id):
# TODO - find related issues using sql database based on search input
searchString=searchString.replace("'","''").\
replace("*","\*").\
replace("?","\?").\
replace("[","\[").\
replace("$","\$").\
replace("^","\^").\
replace("]","\]").\
replace(".","\.").\
replace("+","\+").\
replace("{","\{").\
replace("}","\}").\
replace("%","%%").\
replace("\\","\\\\")
connection=database_connect()
if connection is not None:
cursor=connection.cursor()
else:
return [{}]
name=None
keyword=[]
try:
extracted=re.search(r"@([A-Za-z\\ ]+)@",searchString).group(1)
name=extracted
except AttributeError:
pass
keyword=re.sub(r"@[a-zA-z ]*@","",searchString).split("|")
keyword=[i.strip() for i in keyword]
issue_db = []
if not name and keyword[0]!='':#only title and description search. Name is empty
pattern="|".join(keyword)
cursor.execute("SELECT title, creator, resolver, verifier, description, id FROM a3_issue\
WHERE (title ~* (%s) or description ~* (%s)) and\
(verifier=(%s) or resolver=(%s) or creator=(%s));",(pattern,pattern,member_id,member_id,member_id))
result=cursor.fetchall()
issue_db.extend(result)
elif name and keyword[0]=="": #only name match. Title and description are empty
cursor.execute("SELECT title, creator, resolver, verifier, description, his.id FROM \
a3_user INNER JOIN \
(SELECT * from a3_issue where creator =(%s) or verifier =(%s) or resolver=(%s)) as his\
ON a3_user.id=his.creator or a3_user.id=his.resolver or a3_user.id=his.verifier\
WHERE firstname ~* (%s) or lastname ~* (%s);",(member_id,member_id,member_id,name,name))
result=cursor.fetchall()
result=list(set([tuple(i) for i in result]))
issue_db.extend(result)
elif name and keyword[0]!="":#name and keywords are not empty
pattern="|".join(keyword)
cursor.execute("SELECT title, creator, resolver, verifier, description, his.id FROM\
a3_user INNER JOIN\
(SELECT * from a3_issue where creator =(%s) or verifier =(%s) or resolver=(%s)) as his\
ON a3_user.id=his.creator or a3_user.id=his.resolver or a3_user.id=his.verifier\
WHERE (firstname ~* (%s) or lastname ~* (%s)) AND (title ~* (%s) or description ~* (%s));"\
,(member_id,member_id,member_id,name,name,pattern,pattern))
result=cursor.fetchall()
result=list(set([tuple(i) for i in result]))
issue_db.extend(result)
elif not name and keyword[0]=="":# name and keywords are both empty (searchString is empty)
cursor.execute("SELECT title, creator, resolver, verifier, description, id FROM a3_issue \
WHERE creator=(%s) or resolver=(%s) or verifier=(%s);",(member_id,member_id,member_id));
result=cursor.fetchall();
issue_db.extend(result)
cursor.close()
connection.close()
issue = [{
'title': row[0],
'creator': row[1],
'resolver': row[2],
'verifier': row[3],
'description': row[4],
'issue_id': row[5]
} for row in issue_db]
return issue
| wx-Yao/Didactic-DBMS | Database_Application_Development/database.py | Python | mit | 8,548 |
from fastapi.testclient import TestClient
from docs_src.metadata.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {
"title": "ChimichangApp",
"description": "\nChimichangApp API helps you do awesome stuff. 🚀\n\n## Items\n\nYou can **read items**.\n\n## Users\n\nYou will be able to:\n\n* **Create users** (_not implemented_).\n* **Read users** (_not implemented_).\n",
"termsOfService": "http://example.com/terms/",
"contact": {
"name": "Deadpoolio the Amazing",
"url": "http://x-force.example.com/contact/",
"email": "dp@x-force.example.com",
},
"license": {
"name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0.html",
},
"version": "0.0.1",
},
"paths": {
"/items/": {
"get": {
"summary": "Read Items",
"operationId": "read_items_items__get",
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
}
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_items():
response = client.get("/items/")
assert response.status_code == 200, response.text
assert response.json() == [{"name": "Katana"}]
| tiangolo/fastapi | tests/test_tutorial/test_metadata/test_tutorial001.py | Python | mit | 1,611 |
from django.db import models
from lino.api import dd
from lino.utils import join_elems
from etgen.html import E
from lino.mixins.polymorphic import Polymorphic
class Place(dd.Model):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Member(Polymorphic):
name = models.CharField(max_length=200)
place = dd.ForeignKey(Place, blank=True, null=True)
email = models.EmailField(max_length=200, blank=True)
def __str__(self):
return self.name
class Customer(Member):
customer_remark = models.CharField(max_length=200, blank=True)
class Supplier(Member):
supplier_remark = models.CharField(max_length=200, blank=True)
class Product(dd.Model):
name = models.CharField(max_length=200)
suppliers = models.ManyToManyField(
'Supplier', through='Offer',
related_name='offered_products')
customers = models.ManyToManyField(
'Customer', through='Demand',
related_name='wanted_products')
def __str__(self):
return self.name
@dd.displayfield("Offered by")
def offered_by(self, ar):
if ar is None:
return ''
items = [ar.obj2html(o) for o in self.suppliers.all()]
items = join_elems(items, sep=', ')
return E.p(*items)
@dd.displayfield("Wanted by")
def demanded_by(self, ar):
if ar is None:
return ''
items = [ar.obj2html(o) for o in self.customers.all()]
items = join_elems(items, sep=', ')
return E.p(*items)
class Offer(dd.Model):
supplier = dd.ForeignKey(Supplier)
product = dd.ForeignKey(Product)
valid_until = models.DateField(blank=True, null=True)
def __str__(self):
return "%s offered by %s" % (self.product, self.supplier)
class Demand(dd.Model):
customer = dd.ForeignKey(Customer)
product = dd.ForeignKey(Product)
def __str__(self):
return "%s (%s)" % (self.product, self.customer)
| lino-framework/book | lino_book/projects/lets2/lets/models.py | Python | bsd-2-clause | 1,995 |
import sys
if sys.version_info[0] < 3:
print("This game is written in python 3.\nRun 'python3 "+sys.argv[0]+"' or './"+sys.argv[0]+"'")
sys.exit(-1)
if __package__ == "asciifarm.client":
from . import main
else:
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
from asciifarm.client import main
main.main()
| jmdejong/Asciifarm | asciifarm/client/__main__.py | Python | gpl-3.0 | 371 |
# Copyright (c) Siemens AG, 2013
#
# This file is part of MANTIS. MANTIS is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either version 2
# of the License, or(at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from urlparse import urlparse
def extract_fqdn(uri):
""" Extract the FQDN from an URI."""
try:
parsed = urlparse(uri)
fqdn = parsed.netloc
return fqdn
except:
return None
| siemens/django-dingos | dingos/core/extractors.py | Python | gpl-2.0 | 981 |
from django.conf.urls import url
from . import views
urlpatterns = (
url(r'^$', views.group_selection, name='group-landing'),
url(r'^(?P<group_slug>[\w-]+)/$',
views.group_dashboard, name='group-detail'),
url(r'^(?P<group_slug>[\w-]+)/(?P<tenant_slug>[\w-]+)/$',
views.tenant_dashboard, name='tenant-detail'),
)
| theirc/rapidsms-multitenancy | multitenancy/urls.py | Python | bsd-3-clause | 343 |
"""Unit tests for the route authentication plugin."""
import logging
import unittest
from datetime import datetime, timezone
from unittest.mock import Mock
import bottle
from shared.routes.plugins import InjectionPlugin
from external.routes.plugins.auth_plugin import AuthPlugin, EDIT_REPORT_PERMISSION
class AuthPluginTest(unittest.TestCase):
"""Unit tests for the route authentication and authorization plugin."""
def setUp(self):
"""Override to set up a mock database and install the plugins."""
logging.disable()
self.database = Mock()
self.database.reports_overviews.find_one.return_value = dict(_id="id")
self.database.sessions.find_one.return_value = None
self.success = '{"ok": true}'
self.session = dict(
user="jadoe",
email="jadoe@example.org",
session_expiration_datetime=datetime.max.replace(tzinfo=timezone.utc),
)
self.injection_plugin = bottle.install(InjectionPlugin(self.database, "database"))
self.auth_plugin = bottle.install(AuthPlugin())
def tearDown(self):
"""Override to remove the plugins and reset the logging."""
bottle.uninstall(self.auth_plugin)
bottle.uninstall(self.injection_plugin)
logging.disable(logging.NOTSET)
@staticmethod
def route(database): # pylint: disable=unused-argument
"""Route handler with database parameter."""
return dict(ok=True)
def test_route_without_specified_auth(self):
"""Test that the auth plugin will crash."""
route = bottle.Route(bottle.app(), "/", "POST", self.route)
with self.assertRaises(AttributeError):
route.call()
def test_valid_session(self):
"""Test that session ids are authenticated."""
self.database.sessions.find_one.return_value = dict(
session_expiration_datetime=datetime.max.replace(tzinfo=timezone.utc)
)
route = bottle.Route(bottle.app(), "/", "POST", self.route, authentication_required=True)
self.assertEqual(self.success, route.call())
def test_expired_session(self):
"""Test that the session is invalid when it's expired."""
self.database.sessions.find_one.return_value = dict(
session_expiration_datetime=datetime.min.replace(tzinfo=timezone.utc)
)
route = bottle.Route(bottle.app(), "/", "POST", self.route, authentication_required=True)
self.assertEqual(401, route.call().status_code)
def test_missing_session(self):
"""Test that the session is invalid when it's missing."""
route = bottle.Route(bottle.app(), "/", "POST", self.route, authentication_required=True)
self.assertEqual(401, route.call().status_code)
def test_unauthorized_session(self):
"""Test that an unauthorized user cannot post."""
self.database.reports_overviews.find_one.return_value = dict(
_id="id", permissions={EDIT_REPORT_PERMISSION: ["jodoe"]}
)
self.database.sessions.find_one.return_value = self.session
route = bottle.Route(bottle.app(), "/", "POST", self.route, permissions_required=[EDIT_REPORT_PERMISSION])
self.assertEqual(403, route.call().status_code)
def test_post_route_with_permissions_required_when_everyone_has_permission(self):
"""Test that an authenticated user can post if permissions have not been restricted."""
self.database.reports_overviews.find_one.return_value = dict(_id="id", permissions={})
self.database.sessions.find_one.return_value = self.session
route = bottle.Route(bottle.app(), "/", "POST", self.route, permissions_required=[EDIT_REPORT_PERMISSION])
self.assertEqual(self.success, route.call())
def test_post_route_with_permissions_required(self):
"""Test that an authenticated user can post if they have the required permissions."""
self.database.reports_overviews.find_one.return_value = dict(
_id="id", permissions={EDIT_REPORT_PERMISSION: ["jadoe"]}
)
self.database.sessions.find_one.return_value = self.session
route = bottle.Route(bottle.app(), "/", "POST", self.route, permissions_required=[EDIT_REPORT_PERMISSION])
self.assertEqual(self.success, route.call())
def test_post_route_without_authentication_required(self):
"""Test that unauthenticated users can POST if no authentication is required."""
route = bottle.Route(bottle.app(), "/", "POST", self.route, authentication_required=False)
self.assertEqual(self.success, route.call())
def test_get_route_without_authentication_required(self):
"""Test that unauthenticated users can GET if no authentication is required."""
route = bottle.Route(bottle.app(), "/", "GET", self.route, authentication_required=False)
self.assertEqual(self.success, route.call())
| ICTU/quality-time | components/server/tests/external/routes/plugins/test_route_auth_plugin.py | Python | apache-2.0 | 4,927 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from datetime import datetime
import numpy as np
from numpy import ma
from pyWOA.woa import WOA
def test_import():
# A shortcut
from pyWOA import WOA
db = WOA()
def test_available_vars():
db = WOA()
for v in ['TEMP', 'PSAL']:
assert v in db.keys()
# ==== Request points coincidents to the WOA gridpoints
def test_coincident_gridpoint():
db = WOA()
t = db['TEMP'].extract(var='mn', doy=136.875,
depth=0, lat=17.5, lon=-37.5)
assert np.allclose(t['mn'], [24.60449791])
t = db['TEMP'].extract(var='t_mn', doy=[136.875, 228.125],
depth=0, lat=17.5, lon=-37.5)
assert np.allclose(t['t_mn'], [24.60449791, 26.38446426])
t = db['TEMP'].extract(var='t_mn', doy=136.875,
depth=[0, 10], lat=17.5, lon=-37.5)
assert np.allclose(t['t_mn'], [24.60449791, 24.62145996])
t = db['TEMP'].extract(var='t_mn', doy=136.875,
depth=0, lat=[17.5, 12.5], lon=-37.5)
assert np.allclose(t['t_mn'], [25.17827606, 24.60449791])
t = db['TEMP'].extract(var='t_mn', doy=136.875,
depth=0, lat=17.5, lon=[-37.5, -32.5])
assert np.allclose(t['t_mn'], [24.60449791, 23.98172188])
t = db['TEMP'].extract(var='t_mn', doy=136.875,
depth=[0, 10], lat=[17.5, 12.5], lon=-37.5)
assert np.allclose(t['t_mn'],
[[ 25.17827606, 24.60449791], [ 25.25433731, 24.62145996]])
def test_lon_cyclic():
db = WOA()
t1 = db['TEMP'].extract(var='t_mn', doy=136.875,
depth=0, lat=17.5, lon=182.5)
t2 = db['TEMP'].extract(var='t_mn', doy=136.875,
depth=0, lat=17.5, lon=-177.5)
assert np.allclose(t1['t_mn'], t2['t_mn'])
t1 = db['TEMP'].extract(var='t_mn', doy=136.875,
depth=0, lat=17.5, lon=[-37.5, -32.5])
t2 = db['TEMP'].extract(var='t_mn', doy=136.875,
depth=0, lat=17.5, lon=[322.5, 327.5])
assert np.allclose(t1['t_mn'], t2['t_mn'])
def test_no_data_available():
""" This is a position without valid data """
db = WOA()
out = db['TEMP'].extract(doy=155, lat=48.1953, lon=-69.5855,
depth=[2.0, 5.0, 6.0, 21.0, 44.0, 79.0, 5000])
assert sorted(out.keys()) == [u't_dd', u't_mn', u't_sd', u't_se']
for v in out:
ma.getmaskarray(out[v]).all()
def test_extract_overlimit():
""" Thest a request over the limits of the database """
db = WOA()
t = db['TEMP'].extract(var='t_mn', doy=136.875,
depth=5502, lat=17.5, lon=-37.5)
assert ma.is_masked(t['t_mn'])
t = db['TEMP'].extract(var='t_mn', doy=136.875,
depth=[10, 5502], lat=17.5, lon=-37.5)
assert np.all(t['t_mn'].mask == [False, True])
assert ma.allclose(t['t_mn'],
ma.masked_array([24.62145996, 0], mask=[False, True]))
# ======
def notest_get_point():
db = WOA()
t = db['TEMP'].extract(var='t_mn', doy=90,
depth=0, lat=17.5, lon=-37.5)
#depth=0, lat=10, lon=330)
assert np.allclose(t['mn'], [24.60449791])
def notest_get_profile():
db = WOA()
t = db['TEMP'].extract(var='mn', doy=10,
depth=[0,10], lat=10, lon=330)
assert np.allclose(t['mn'], [ 28.09378815, 28.09343529])
t = db['TEMP'].extract(doy=10,
depth=[0,10], lat=10, lon=330)
assert np.allclose(t['t_se'], [ 0.01893404, 0.0176903 ])
assert np.allclose(t['t_sd'], [ 0.5348658, 0.4927946])
assert np.allclose(t['t_mn'], [ 28.09378815, 28.09343529])
assert np.allclose(t['t_dd'], [ 798, 776])
def notest_get_track():
db = WOA()
db['TEMP'].get_track(doy=[datetime.now()], depth=0, lat=[10], lon=[330])
db['TEMP'].get_track(doy=2*[datetime.now()], depth=0, lat=[10, 12], lon=[330, -35])
def test_dev():
db = WOA()
t = db['TEMP'].extract(doy=228.125, lat=12.5, lon=-37.5)
| castelao/pyWOA | tests/test_WOA_from_nc.py | Python | bsd-3-clause | 3,882 |
# -*- coding: utf-8 -*-
# Copyright: 2009-2011 Gentoo Foundation
# Author(s): Petteri Räty (betelgeuse@gentoo.org)
# License: GPL2
__all__ = ['database']
import errno
import portage
from portage.cache import fs_template
from portage.versions import catsplit
from portage import cpv_getkey
from portage import os
from portage import _encodings
from portage import _unicode_decode
portage.proxy.lazyimport.lazyimport(globals(),
'xattr')
class NoValueException(Exception):
pass
class database(fs_template.FsBased):
autocommits = True
def __init__(self, *args, **config):
super(database,self).__init__(*args, **config)
self.portdir = self.label
self.ns = xattr.NS_USER + '.gentoo.cache'
self.keys = set(self._known_keys)
self.keys.add('_mtime_')
self.keys.add('_eclasses_')
# xattrs have an upper length
self.max_len = self.__get_max()
def __get_max(self):
path = os.path.join(self.portdir,'profiles/repo_name')
try:
return int(self.__get(path,'value_max_len'))
except NoValueException as e:
max = self.__calc_max(path)
self.__set(path,'value_max_len',str(max))
return max
def __calc_max(self,path):
""" Find out max attribute length supported by the file system """
hundred = ''
for i in range(100):
hundred+='a'
s=hundred
# Could use finally but needs python 2.5 then
try:
while True:
self.__set(path,'test_max',s)
s+=hundred
except IOError as e:
# ext based give wrong errno
# http://bugzilla.kernel.org/show_bug.cgi?id=12793
if e.errno in (errno.E2BIG, errno.ENOSPC):
result = len(s)-100
else:
raise
try:
self.__remove(path,'test_max')
except IOError as e:
if e.errno != errno.ENODATA:
raise
return result
def __get_path(self,cpv):
cat,pn = catsplit(cpv_getkey(cpv))
return os.path.join(self.portdir,cat,pn,os.path.basename(cpv) + ".ebuild")
def __has_cache(self,path):
try:
self.__get(path,'_mtime_')
except NoValueException as e:
return False
return True
def __get(self,path,key,default=None):
try:
return xattr.get(path,key,namespace=self.ns)
except IOError as e:
if not default is None and errno.ENODATA == e.errno:
return default
else:
raise NoValueException()
def __remove(self,path,key):
xattr.remove(path,key,namespace=self.ns)
def __set(self,path,key,value):
xattr.set(path,key,value,namespace=self.ns)
def _getitem(self, cpv):
values = {}
path = self.__get_path(cpv)
all = {}
for tuple in xattr.get_all(path,namespace=self.ns):
key,value = tuple
all[key] = value
if not '_mtime_' in all:
raise KeyError(cpv)
# We default to '' like other caches
for key in self.keys:
attr_value = all.get(key,'1:')
parts,sep,value = attr_value.partition(':')
parts = int(parts)
if parts > 1:
for i in range(1,parts):
value += all.get(key+str(i))
values[key] = value
return values
def _setitem(self, cpv, values):
path = self.__get_path(cpv)
max = self.max_len
for key,value in values.items():
# mtime comes in as long so need to convert to strings
s = str(value)
# We need to split long values
value_len = len(s)
parts = 0
if value_len > max:
# Find out how many parts we need
parts = value_len/max
if value_len % max > 0:
parts += 1
# Only the first entry carries the number of parts
self.__set(path,key,'%s:%s'%(parts,s[0:max]))
# Write out the rest
for i in range(1,parts):
start = i * max
val = s[start:start+max]
self.__set(path,key+str(i),val)
else:
self.__set(path,key,"%s:%s"%(1,s))
def _delitem(self, cpv):
pass # Will be gone with the ebuild
def __contains__(self, cpv):
return os.path.exists(self.__get_path(cpv))
def __iter__(self):
for root, dirs, files in os.walk(self.portdir):
for file in files:
try:
file = _unicode_decode(file,
encoding=_encodings['fs'], errors='strict')
except UnicodeDecodeError:
continue
if file[-7:] == '.ebuild':
cat = os.path.basename(os.path.dirname(root))
pn_pv = file[:-7]
path = os.path.join(root,file)
if self.__has_cache(path):
yield "%s/%s/%s" % (cat,os.path.basename(root),file[:-7])
| nullishzero/Portage | pym/portage/cache/ebuild_xattr.py | Python | gpl-2.0 | 4,204 |
#!/usr/bin/env python
text = open("time.txt", "r").read()
import re
def toMs(string):
# e.g. "0m0.031s"
matches = re.search("(\d+)m(\d+)\.(\d+)s", string )
min = int(matches.group(1))
sec = int(matches.group(2))
ms = int(matches.group(3))
return (min*60 + sec)*1000 + ms
real = toMs( re.search("real\s+(\S+)", text).group(1) )
user = toMs( re.search("user\s+(\S+)", text).group(1) )
sys = toMs( re.search("sys\s+(\S+)", text).group(1) )
print( "real {0}ms\nprogram {1}ms".format(real, user+sys) )
| ilyaigpetrov/report-graph-traversal-c-vs-java | Code/retime.py | Python | mit | 529 |
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the BadCoind-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
BadCoindir = "./";
inFile = BadCoindir+"/share/qt/Info.plist"
outFile = "Badcoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = BadCoindir+"badcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created" | BadCoinBoss/BadCoin | share/qt/clean_mac_info_plist.py | Python | mit | 895 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib.db import constants as db_const
from neutron_lib.db import model_base
from oslo_utils import timeutils
import sqlalchemy as sa
from sqlalchemy import event # noqa
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as se
from neutron._i18n import _
from neutron.db import sqlalchemytypes
class StandardAttribute(model_base.BASEV2):
"""Common table to associate all Neutron API resources.
By having Neutron objects related to this table, we can associate new
tables that apply to many Neutron objects (e.g. timestamps, rbac entries)
to this table to avoid schema duplication while maintaining referential
integrity.
NOTE(kevinbenton): This table should not have more columns added to it
unless we are absolutely certain the new column will have a value for
every single type of Neutron resource. Otherwise this table will be filled
with NULL entries for combinations that don't make sense. Additionally,
by keeping this table small we can ensure that performance isn't adversely
impacted for queries on objects.
"""
# sqlite doesn't support auto increment on big integers so we use big int
# for everything but sqlite
id = sa.Column(sa.BigInteger().with_variant(sa.Integer(), 'sqlite'),
primary_key=True, autoincrement=True)
# NOTE(kevinbenton): this column is redundant information, but it allows
# operators/devs to look at the contents of this table and know which table
# the corresponding object is in.
# 255 was selected as a max just because it's the varchar ceiling in mysql
# before a 2-byte prefix is required. We shouldn't get anywhere near this
# limit with our table names...
resource_type = sa.Column(sa.String(255), nullable=False)
description = sa.Column(sa.String(db_const.DESCRIPTION_FIELD_SIZE))
revision_number = sa.Column(
sa.BigInteger().with_variant(sa.Integer(), 'sqlite'),
server_default='0', nullable=False)
created_at = sa.Column(sqlalchemytypes.TruncatedDateTime,
default=timeutils.utcnow)
updated_at = sa.Column(sqlalchemytypes.TruncatedDateTime,
onupdate=timeutils.utcnow)
__mapper_args__ = {
# see http://docs.sqlalchemy.org/en/latest/orm/versioning.html for
# details about how this works
"version_id_col": revision_number,
"version_id_generator": False # revision plugin increments manually
}
def bump_revision(self):
if self.revision_number is None:
# this is a brand new object uncommitted so we don't bump now
return
self.revision_number += 1
class HasStandardAttributes(object):
@classmethod
def get_api_collections(cls):
"""Define the API collection this object will appear under.
This should return a list of API collections that the object
will be exposed under. Most should be exposed in just one
collection (e.g. the network model is just exposed under
'networks').
This is used by the standard attr extensions to discover which
resources need to be extended with the standard attr fields
(e.g. created_at/updated_at/etc).
"""
# NOTE(kevinbenton): can't use abc because the metaclass conflicts
# with the declarative base others inherit from.
if hasattr(cls, 'api_collections'):
return cls.api_collections
raise NotImplementedError(_("%s must define api_collections") % cls)
@classmethod
def get_api_sub_resources(cls):
"""Define the API sub-resources this object will appear under.
This should return a list of API sub-resources that the object
will be exposed under.
This is used by the standard attr extensions to discover which
sub-resources need to be extended with the standard attr fields
(e.g. created_at/updated_at/etc).
"""
try:
return cls.api_sub_resources
except AttributeError:
return []
@classmethod
def get_collection_resource_map(cls):
try:
return cls.collection_resource_map
except AttributeError:
raise NotImplementedError(_("%s must define "
"collection_resource_map") % cls)
@classmethod
def validate_tag_support(cls):
return getattr(cls, 'tag_support', False)
@declarative.declared_attr
def standard_attr_id(cls):
return sa.Column(
sa.BigInteger().with_variant(sa.Integer(), 'sqlite'),
sa.ForeignKey(StandardAttribute.id, ondelete="CASCADE"),
unique=True,
nullable=False
)
# NOTE(kevinbenton): we have to disable the following pylint check because
# it thinks we are overriding this method in the __init__ method.
# pylint: disable=method-hidden
@declarative.declared_attr
def standard_attr(cls):
return sa.orm.relationship(StandardAttribute,
lazy='joined',
cascade='all, delete-orphan',
single_parent=True,
uselist=False)
def __init__(self, *args, **kwargs):
standard_attr_keys = ['description', 'created_at',
'updated_at', 'revision_number']
standard_attr_kwargs = {}
for key in standard_attr_keys:
if key in kwargs:
standard_attr_kwargs[key] = kwargs.pop(key)
super(HasStandardAttributes, self).__init__(*args, **kwargs)
# here we automatically create the related standard attribute object
self.standard_attr = StandardAttribute(
resource_type=self.__tablename__, **standard_attr_kwargs)
@declarative.declared_attr
def description(cls):
return association_proxy('standard_attr', 'description')
@declarative.declared_attr
def created_at(cls):
return association_proxy('standard_attr', 'created_at')
@declarative.declared_attr
def updated_at(cls):
return association_proxy('standard_attr', 'updated_at')
def update(self, new_dict):
# ignore the timestamps if they were passed in. For example, this
# happens if code calls update_port with modified results of get_port
new_dict.pop('created_at', None)
new_dict.pop('updated_at', None)
super(HasStandardAttributes, self).update(new_dict)
@declarative.declared_attr
def revision_number(cls):
return association_proxy('standard_attr', 'revision_number')
def bump_revision(self):
# SQLAlchemy will bump the version for us automatically if the
# standard attr record is being modified, but we must call this
# for all other modifications or when relevant children are being
# modified (e.g. fixed_ips change should bump port revision)
self.standard_attr.bump_revision()
def _resource_model_map_helper(rs_map, resource, subclass):
if resource in rs_map:
raise RuntimeError(_("Model %(sub)s tried to register for API "
"resource %(res)s which conflicts with model "
"%(other)s.") %
dict(sub=subclass,
other=rs_map[resource],
res=resource))
rs_map[resource] = subclass
def get_standard_attr_resource_model_map(include_resources=True,
include_sub_resources=True):
rs_map = {}
for subclass in HasStandardAttributes.__subclasses__():
if include_resources:
for resource in subclass.get_api_collections():
_resource_model_map_helper(rs_map, resource, subclass)
if include_sub_resources:
for sub_resource in subclass.get_api_sub_resources():
_resource_model_map_helper(rs_map, sub_resource, subclass)
return rs_map
def get_tag_resource_parent_map():
parent_map = {}
for subclass in HasStandardAttributes.__subclasses__():
if subclass.validate_tag_support():
for collection, resource in (subclass.get_collection_resource_map()
.items()):
if collection in parent_map:
msg = (_("API parent %(collection)s/%(resource)s for "
"model %(subclass)s is already registered.") %
dict(collection=collection, resource=resource,
subclass=subclass))
raise RuntimeError(msg)
parent_map[collection] = resource
return parent_map
@event.listens_for(se.Session, 'after_bulk_delete')
def throw_exception_on_bulk_delete_of_listened_for_objects(delete_context):
if hasattr(delete_context.mapper.class_, 'revises_on_change'):
raise RuntimeError(_("%s may not be deleted in bulk because it "
"bumps the revision of other resources via "
"SQLAlchemy event handlers, which are not "
"compatible with bulk deletes.") %
delete_context.mapper.class_)
| noironetworks/neutron | neutron/db/standard_attr.py | Python | apache-2.0 | 9,979 |
'''
Created on Apr 21, 2014
@author: Borja
'''
import os.path
import xlrd
from data import __data__
class XslReader(object):
def __init__(self):
if not os.path.exists(__data__.path()):
os.makedirs(__data__.path())
self._data_path = __data__.path()
def load_xsl(self, file_name, rows_range, cols_range):
inpath = os.path.join(self._data_path, os.path.basename(file_name))
workbook = xlrd.open_workbook(inpath)
worksheet = workbook.sheet_by_index(0)
first_row = int(rows_range[0])
first_col = int(cols_range[0])
rows_number = int(rows_range[1]) - int(rows_range[0]) + 1
cols_number = int(cols_range[1]) - int(cols_range[0]) + 1
data_matrix = [[0 for x in xrange(cols_number)] for x in xrange(rows_number)]
for curr_row in range (int(rows_range[0]), int(rows_range[1]) + 1):
for curr_cell in range (int(cols_range[0]), int(cols_range[1]) + 1):
if worksheet.cell_type(curr_row, curr_cell) == 1: # text cell
data_matrix[curr_row - first_row][curr_cell - first_col] = worksheet.cell_value(curr_row, curr_cell).encode("UTF-8");
else:
data_matrix[curr_row - first_row][curr_cell - first_col] = worksheet.cell_value(curr_row, curr_cell);
return data_matrix
| landportal/landbook-importers | FAOAgriculturalCensues_Importer/es/weso/fao/ExcelManagement/excel_reader.py | Python | mit | 1,436 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['MovingAverage'] , ['Seasonal_DayOfMonth'] , ['AR'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_MovingAverage_Seasonal_DayOfMonth_AR.py | Python | bsd-3-clause | 174 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prediction and evaluation-related utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import tensorflow.compat.v1 as tf
from schema_guided_dst import schema
from schema_guided_dst.baseline import data_utils
REQ_SLOT_THRESHOLD = 0.5
def get_predicted_dialog(dialog, all_predictions, schemas):
"""Update labels in a dialogue based on model predictions.
Args:
dialog: A json object containing dialogue whose labels are to be updated.
all_predictions: A dict mapping prediction name to the predicted value. See
SchemaGuidedDST class for the contents of this dict.
schemas: A Schema object wrapping all the schemas for the dataset.
Returns:
A json object containing the dialogue with labels predicted by the model.
"""
# Overwrite the labels in the turn with the predictions from the model. For
# test set, these labels are missing from the data and hence they are added.
dialog_id = dialog["dialogue_id"]
# The slot values tracked for each service.
all_slot_values = collections.defaultdict(dict)
for turn_idx, turn in enumerate(dialog["turns"]):
if turn["speaker"] == "USER":
user_utterance = turn["utterance"]
system_utterance = (
dialog["turns"][turn_idx - 1]["utterance"] if turn_idx else "")
turn_id = "{:02d}".format(turn_idx)
for frame in turn["frames"]:
predictions = all_predictions[(dialog_id, turn_id, frame["service"])]
slot_values = all_slot_values[frame["service"]]
service_schema = schemas.get_service_schema(frame["service"])
# Remove the slot spans and state if present.
frame.pop("slots", None)
frame.pop("state", None)
# The baseline model doesn't predict slot spans. Only state predictions
# are added.
state = {}
# Add prediction for active intent. Offset is subtracted to account for
# NONE intent.
active_intent_id = predictions["intent_status"]
state["active_intent"] = (
service_schema.get_intent_from_id(active_intent_id - 1)
if active_intent_id else "NONE")
# Add prediction for requested slots.
requested_slots = []
for slot_idx, slot in enumerate(service_schema.slots):
if predictions["req_slot_status"][slot_idx] > REQ_SLOT_THRESHOLD:
requested_slots.append(slot)
state["requested_slots"] = requested_slots
# Add prediction for user goal (slot values).
# Categorical slots.
for slot_idx, slot in enumerate(service_schema.categorical_slots):
slot_status = predictions["cat_slot_status"][slot_idx]
if slot_status == data_utils.STATUS_DONTCARE:
slot_values[slot] = data_utils.STR_DONTCARE
elif slot_status == data_utils.STATUS_ACTIVE:
value_idx = predictions["cat_slot_value"][slot_idx]
slot_values[slot] = (
service_schema.get_categorical_slot_values(slot)[value_idx])
# Non-categorical slots.
for slot_idx, slot in enumerate(service_schema.non_categorical_slots):
slot_status = predictions["noncat_slot_status"][slot_idx]
if slot_status == data_utils.STATUS_DONTCARE:
slot_values[slot] = data_utils.STR_DONTCARE
elif slot_status == data_utils.STATUS_ACTIVE:
tok_start_idx = predictions["noncat_slot_start"][slot_idx]
tok_end_idx = predictions["noncat_slot_end"][slot_idx]
ch_start_idx = predictions["noncat_alignment_start"][tok_start_idx]
ch_end_idx = predictions["noncat_alignment_end"][tok_end_idx]
if ch_start_idx < 0 and ch_end_idx < 0:
# Add span from the system utterance.
slot_values[slot] = (
system_utterance[-ch_start_idx - 1:-ch_end_idx])
elif ch_start_idx > 0 and ch_end_idx > 0:
# Add span from the user utterance.
slot_values[slot] = (user_utterance[ch_start_idx - 1:ch_end_idx])
# Create a new dict to avoid overwriting the state in previous turns
# because of use of same objects.
state["slot_values"] = {s: [v] for s, v in slot_values.items()}
frame["state"] = state
return dialog
def write_predictions_to_file(predictions, input_json_files, schema_json_file,
output_dir):
"""Write the predicted dialogues as json files.
Args:
predictions: An iterator containing model predictions. This is the output of
the predict method in the estimator.
input_json_files: A list of json paths containing the dialogues to run
inference on.
schema_json_file: Path for the json file containing the schemas.
output_dir: The directory where output json files will be created.
"""
tf.compat.v1.logging.info("Writing predictions to %s.", output_dir)
schemas = schema.Schema(schema_json_file)
# Index all predictions.
all_predictions = {}
for idx, prediction in enumerate(predictions):
if not prediction["is_real_example"]:
continue
tf.compat.v1.logging.log_every_n(
tf.compat.v1.logging.INFO, "Processed %d examples.", 500, idx)
_, dialog_id, turn_id, service_name = (
prediction["example_id"].decode("utf-8").split("-"))
all_predictions[(dialog_id, turn_id, service_name)] = prediction
# Read each input file and write its predictions.
for input_file_path in input_json_files:
with tf.io.gfile.GFile(input_file_path) as f:
dialogs = json.load(f)
pred_dialogs = []
for d in dialogs:
pred_dialogs.append(get_predicted_dialog(d, all_predictions, schemas))
input_file_name = os.path.basename(input_file_path)
output_file_path = os.path.join(output_dir, input_file_name)
with tf.io.gfile.GFile(output_file_path, "w") as f:
json.dump(
pred_dialogs, f, indent=2, separators=(",", ": "), sort_keys=True)
| google-research/google-research | schema_guided_dst/baseline/pred_utils.py | Python | apache-2.0 | 6,649 |
#!/usr/bin/env python
##########################################################
# The setup.py for the Sage Notebook
##########################################################
import os
from setuptools import setup
def lremove(string, prefix):
while string.startswith(prefix):
string = string[len(prefix):]
return string
def all_files(dir, prefix):
"""
Return list of all filenames in the given directory, with prefix
stripped from the left of the filenames.
"""
X = []
for F in os.listdir(dir):
ab = dir+'/'+F
if os.path.isfile(ab):
X.append(lremove(ab, prefix))
elif os.path.isdir(ab):
X.extend(all_files(ab, prefix))
return X
install_requires = [
'twisted>=11.0.0',
'flask>=0.10.1',
'flask-openid',
'flask-autoindex',
'flask-babel'
'flask-themes2',
'future',
'smtpsend',
'pexpect',
'docutils',
'jsmin',
'pyopenssl',
'service_identity',
'appdirs',
'tornado', # this is optional
]
setup(
name='sagenb',
version='0.13',
description='The Sage Notebook',
license='GNU General Public License (GPL) v3+',
author='William Stein et al.',
author_email='sage-notebook@googlegroups.com',
url='http://github.com/sagemath/sagenb',
install_requires=install_requires,
dependency_links=[],
test_suite='sagenb.testing.run_tests.all_tests',
packages=[
'sagewui',
'sagewui.blueprints',
'sagewui.gui',
'sagewui.sage_server',
'sagewui.storage',
'sagewui.util',
'sagewui.compress',
'sagenb',
'sagenb.notebook',
'sagenb.testing',
'sagenb.testing.tests',
'sagenb.testing.selenium',
],
scripts=['sagewui/static/sage3d/sage3d'],
package_data={
'sagewui': (all_files('sagewui/static', 'sagewui/') +
all_files('sagewui/translations', 'sagewui/') +
all_files('sagewui/themes', 'sagewui/'))
},
zip_safe=False,
)
| migeruhito/sagenb | setup.py | Python | gpl-3.0 | 2,069 |
def say_hello():
print 'Hello'
if __name__ == '__main__':
say_hello()
| MagicForest/Python | src/training/Core2/Chapter14ExecutionEnvironment/hello.py | Python | apache-2.0 | 88 |
# -*- coding: UTF-8 -*-
# TcosMonitor version __VERSION__
#
# Copyright (c) 2006-2011 Mario Izquierdo <mariodebian@gmail.com>
#
# This package is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
#
# This package is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
import gtk
from gettext import gettext as _
import tcosmonitor.shared
COL_HOST, COL_IP, COL_USERNAME, COL_ACTIVE, COL_LOGGED, COL_BLOCKED, COL_PROCESS, COL_TIME, COL_SEL, COL_SEL_ST = range(10)
# constant to font sizes
PANGO_SCALE=1024
def print_debug(txt):
if tcosmonitor.shared.debug:
print >> sys.stderr, "%s::%s" % (__name__, txt)
#print("%s::%s" % (__name__, txt), file=sys.stderr)
class TcosListView(object):
def __init__(self, main):
print_debug("__init__()")
self.main=main
self.ui=self.main.ui
self.main.updating=True
self.searching=False # boolean True thread running False not running
# define as None and register with info extension
self.populate_datatxt=None
self.model=gtk.ListStore(str, str, str, gtk.gdk.Pixbuf, gtk.gdk.Pixbuf, gtk.gdk.Pixbuf, str, str, bool,bool)
self.main.tabla = self.ui.get_object('hostlist')
self.main.tabla.set_model (self.model)
cell1 = gtk.CellRendererText ()
column1 = gtk.TreeViewColumn (_("Hostname"), cell1, text = COL_HOST)
column1.set_resizable (True)
column1.set_sort_column_id(COL_HOST)
self.main.tabla.append_column (column1)
cell2 = gtk.CellRendererText ()
column2 = gtk.TreeViewColumn (_("IP address"), cell2, text = COL_IP)
column2.set_resizable (True)
column2.set_sort_column_id(COL_IP)
self.main.tabla.append_column (column2)
cell3 = gtk.CellRendererText ()
column3 = gtk.TreeViewColumn (_("Username"), cell3, text = COL_USERNAME)
column3.set_resizable (True)
column3.set_sort_column_id(COL_USERNAME)
self.main.tabla.append_column (column3)
cell4 = gtk.CellRendererPixbuf()
column4 = gtk.TreeViewColumn (_("Active"), cell4, pixbuf = COL_ACTIVE)
#column4.set_sort_column_id(COL_ACTIVE)
self.main.tabla.append_column (column4)
cell5 = gtk.CellRendererPixbuf()
column5 = gtk.TreeViewColumn (_("Logged"), cell5, pixbuf = COL_LOGGED)
#column5.set_sort_column_id(COL_LOGGED)
self.main.tabla.append_column (column5)
cell6 = gtk.CellRendererPixbuf()
column6 = gtk.TreeViewColumn (_("Screen Blocked"), cell6, pixbuf = COL_BLOCKED)
#column6.set_sort_column_id(COL_BLOCKED)
self.main.tabla.append_column (column6)
cell7 = gtk.CellRendererText ()
column7 = gtk.TreeViewColumn (_("Num of process"), cell7, text = COL_PROCESS)
column7.set_resizable (True)
column7.set_sort_column_id(COL_PROCESS)
self.main.tabla.append_column (column7)
cell8 = gtk.CellRendererText ()
column8 = gtk.TreeViewColumn (_("Time log in"), cell8, text = COL_TIME)
column8.set_resizable (True)
column8.set_sort_column_id(COL_TIME)
self.main.tabla.append_column (column8)
if self.main.config.GetVar("selectedhosts") == 1:
cell9 = gtk.CellRendererToggle ()
cell9.connect('toggled', self.on_sel_click, self.model, COL_SEL_ST)
#column9 = gtk.TreeViewColumn(_("Sel"), cell9, active=COL_SEL_ST, activatable=1) # activatable make warnings , not needed
column9 = gtk.TreeViewColumn(_("Sel"), cell9, active=COL_SEL_ST)
self.main.tabla.append_column (column9)
# print rows in alternate colors if theme allow
self.main.tabla.set_rules_hint(True)
self.main.tabla_file = self.main.tabla.get_selection()
self.main.tabla_file.connect("changed", self.on_hostlist_click)
# allow to work right click
self.main.tabla.connect_object("button_press_event", self.on_hostlist_event, self.main.menu)
return
def on_hostlist_click(self, hostlist):
if self.main.worker_running:
return
if not self.populate_datatxt:
print_debug ("on_hostlist_click() self.populate_datatxt() NOT DEFINED")
return
self.main.progressbar.hide()
(model, iter) = hostlist.get_selected()
if not iter:
return
self.main.selected_host=model.get_value(iter,COL_HOST)
self.main.selected_ip=model.get_value(iter, COL_IP)
print_debug ( "on_hostlist_clic() selectedhost=%s selectedip=%s" \
%(self.main.selected_host, self.main.selected_ip) )
# call to read remote info
#self.main.localdata.newhost(self.main.selected_ip)
self.main.xmlrpc.newhost(self.main.selected_ip)
self.main.xmlrpc.ip=self.main.selected_ip
if not self.main.xmlrpc.isPortListening(self.main.selected_ip, self.main.xmlrpc.lastport):
print_debug ( "on_host_list_click() XMLRPC not running in %s" %(self.main.selected_ip) )
self.main.write_into_statusbar ( _("Error connecting to tcosxmlrpc in %s") %(self.main.selected_ip) )
return
print_debug ( "on_host_list_click() AUTH OK" )
self.main.write_into_statusbar ( "" )
print_debug ( "on_hostlist_click() callig worker to populate in Thread" )
self.main.worker=tcosmonitor.shared.Workers( self.main,\
target=self.populate_datatxt, args=([self.main.selected_ip]) ).start()
return
def on_hostlist_event(self, widget, event):
if event.button == 3:
x = int(event.x)
y = int(event.y)
time = event.time
pthinfo = self.main.tabla.get_path_at_pos(x, y)
if pthinfo is not None:
path, col, cellx, celly = pthinfo
#generate menu
self.main.menus.RightClickMenuOne( path )
self.main.tabla.grab_focus()
self.main.tabla.set_cursor( path, col, 0)
self.main.menu.popup( None, None, None, event.button, time)
return 1
else:
self.main.menus.RightClickMenuAll()
self.main.allmenu.popup( None, None, None, event.button, time)
print_debug ( "on_hostlist_event() NO row selected" )
return
def on_sel_click(self, cell, path, model, col=0):
# reverse status of sel row (saved in COL_SEL_ST)
iter = model.get_iter(path)
self.model.set_value(iter, col, not model[path][col])
print_debug("on_sel_click() ip=%s status=%s" %(model[path][COL_IP], model[path][col]))
return True
def isenabled(self):
"""
return True if only configuration enable IconView
prevent to work if ClassView is hidden
"""
if self.main.config.GetVar("listmode") == 'list' or \
self.main.config.GetVar("listmode") == 'both':
return True
return False
def isactive(self):
"""
Return True if IconView is enabled and is active (We click on it)
know this getting tabindex of viewtabas widget.
0 => active list view
1 => active icon view
"""
if not self.isenabled:
return False
if self.main.viewtabs.get_current_page() != 0:
return False
return True
def clear(self):
self.model.clear()
def generate_file(self, data):
self.iter = self.model.append (None)
self.model.set_value (self.iter, COL_HOST, data['hostname'] )
self.model.set_value (self.iter, COL_IP, data['host'] )
self.model.set_value (self.iter, COL_USERNAME, data['username'] )
self.model.set_value (self.iter, COL_ACTIVE, data['image_active'] )
self.model.set_value (self.iter, COL_LOGGED, data['image_logged'] )
self.model.set_value (self.iter, COL_BLOCKED, data['image_blocked'] )
self.model.set_value (self.iter, COL_PROCESS, data['num_process'] )
self.model.set_value (self.iter, COL_TIME, data['time_logged'] )
def getmultiple(self):
allclients=[]
#model=self.main.tabla.get_model()
rows = []
self.model.foreach(lambda model, path, iter: rows.append(path))
for host in rows:
iter=self.model.get_iter(host)
if self.model.get_value(iter, COL_SEL_ST):
allclients.append(self.model.get_value(iter, COL_IP))
return allclients
def get_selected(self):
(model, iter) = self.main.tabla.get_selection().get_selected()
if iter == None:
print_debug( "get_selected() not selected thin client !!!" )
return
return model.get_value(iter, COL_IP)
def get_host(self, ip):
(model, iter) = self.main.tabla.get_selection().get_selected()
if iter == None:
print_debug( "get_selected() not selected thin client !!!" )
return
return model.get_value(iter, COL_HOST)
def change_lockscreen(self, ip, image):
self.model.foreach(self.__lockscreen_changer, [ip, image])
def __lockscreen_changer(self, model, path, iter, args):
ip, image = args
# change image if ip is the same.
if model.get_value(iter, COL_IP) == ip:
model.set_value(iter, COL_BLOCKED, image)
def refresh_client_info(self, ip, data):
self.model.foreach(self.__refresh_client_info, [ip, data] )
def __refresh_client_info(self, model, path, iter, args):
ip, data = args
print_debug ( "__refresh_client_info() ip=%s model_ip=%s" %(ip, model.get_value(iter, COL_IP)) )
# update data if ip is the same.
if model.get_value(iter, COL_IP) == ip:
#self.set_client_data(ip, model, iter)
model.set_value (iter, COL_HOST, data['hostname'] )
model.set_value (iter, COL_IP, data['ip'] )
model.set_value (iter, COL_USERNAME, data['username'] )
model.set_value (iter, COL_ACTIVE, data['image_active'] )
model.set_value (iter, COL_LOGGED, data['image_logged'] )
model.set_value (iter, COL_BLOCKED, data['image_blocked'] )
model.set_value (iter, COL_PROCESS, data['num_process'] )
model.set_value (iter, COL_TIME, data['time_logged'] )
| mariodebian/tcosmonitor | tcosmonitor/TcosListView.py | Python | gpl-2.0 | 11,195 |
"""advanced.py
Illustrate usage of Query combined with the FromCache option,
including front-end loading, cache invalidation, namespace techniques
and collection caching.
"""
import environment
from model import Person, Address, cache_address_bits
from meta import Session, FromCache, RelationCache
from sqlalchemy.orm import eagerload
def load_name_range(start, end, invalidate=False):
"""Load Person objects on a range of names.
start/end are integers, range is then
"person <start>" - "person <end>".
The cache option we set up is called "name_range", indicating
a range of names for the Person class.
The `Person.addresses` collections are also cached. Its basically
another level of tuning here, as that particular cache option
can be transparently replaced with eagerload(Person.addresses).
The effect is that each Person and his/her Address collection
is cached either together or separately, affecting the kind of
SQL that emits for unloaded Person objects as well as the distribution
of data within the cache.
"""
q = Session.query(Person).\
filter(Person.name.between("person %.2d" % start, "person %.2d" % end)).\
options(cache_address_bits).\
options(FromCache("default", "name_range"))
# have the "addresses" collection cached separately
# each lazyload of Person.addresses loads from cache.
q = q.options(RelationCache("default", "by_person", Person.addresses))
# alternatively, eagerly load the "addresses" collection, so that they'd
# be cached together. This issues a bigger SQL statement and caches
# a single, larger value in the cache per person rather than two
# separate ones.
#q = q.options(eagerload(Person.addresses))
# if requested, invalidate the cache on current criterion.
if invalidate:
q.invalidate()
return q.all()
print "two through twelve, possibly from cache:\n"
print ", ".join([p.name for p in load_name_range(2, 12)])
print "\ntwenty five through forty, possibly from cache:\n"
print ", ".join([p.name for p in load_name_range(25, 40)])
# loading them again, no SQL is emitted
print "\ntwo through twelve, from the cache:\n"
print ", ".join([p.name for p in load_name_range(2, 12)])
# but with invalidate, they are
print "\ntwenty five through forty, invalidate first:\n"
print ", ".join([p.name for p in load_name_range(25, 40, True)])
# illustrate the address loading from either cache/already
# on the Person
print "\n\nPeople plus addresses, two through twelve, addresses possibly from cache"
for p in load_name_range(2, 12):
print p.format_full()
# illustrate the address loading from either cache/already
# on the Person
print "\n\nPeople plus addresses, two through twelve, addresses from cache"
for p in load_name_range(2, 12):
print p.format_full()
print "\n\nIf this was the first run of advanced.py, try "\
"a second run. Only one SQL statement will be emitted."
| obeattie/sqlalchemy | examples/beaker_caching/advanced.py | Python | mit | 3,027 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.