repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
onezens/python | basic/15_regex_getimage.py | Python | mit | 386 | 0.03886 | #encoding=utf8
import re
import urllib
def getUrl(url):
page = urllib.urlopen(url)
return page.read()
def getImages(html):
images = re.findall(r'src="(.*?\.(jpg|png))"', html)
x = 1
for imageurl in images :
print('downloading %s'%imageurl[0])
urllib.urlretrieve(imageurl[0], './images/%d.jpg'%x)
x += | 1
html = getUrl('http://www.tooopen.com/im | g/87.aspx')
getImages(html)
|
morelab/weblabdeusto | server/src/test/deployments/federated_basic_sql/provider1/core_config.py | Python | bsd-2-clause | 1,626 | 0.014145 | from __future__ import print_function, unicode_literals
core_store_students_programs = False
core_store_students_programs_path = 'files_stored'
core_experiment_poll_time = 350 # seconds
# Ports
core_facade_port = 28345
core_facade_server_route = 'provider1-route'
# Will only work in JSON in this config file :-(
core_server_url = 'http://127.0.0.1:%s/weblab/' % core_facade_port
# Scheduling
core_coordinator_db_name = 'WebLabCoordination2'
core_coordinator_db_username = 'weblab'
core_coordinator_db_password = 'weblab'
core_coordinator_laboratory_servers = {
"laboratory:main_instance@provider1_machine" : {
"exp1|dummy1|Dummy experiments" : "dummy1@dummy1_local",
" | exp1|dummy3_with_other_name|Dummy experiments" : "dummy3_with_other_name@dummy3_with_other_name",
}
}
core_coordinator_external_servers = {
'dummy1@Dummy | experiments' : [ 'dummy1_external' ],
'dummy4@Dummy experiments' : [ 'dummy4' ],
}
_provider2_scheduling_config = ("EXTERNAL_WEBLAB_DEUSTO", {
'baseurl' : 'http://127.0.0.1:38345/weblab/',
'username' : 'provider1',
'password' : 'password',
})
core_scheduling_systems = {
"dummy1_local" : ("PRIORITY_QUEUE", {}),
"dummy3_with_other_name" : ("PRIORITY_QUEUE", {}),
"dummy4" : _provider2_scheduling_config,
"dummy1_external" : _provider2_scheduling_config,
}
core_weblabdeusto_federation_retrieval_period = 0.1
|
joyhuang-web/flaskbb | tests/unit/utils/test_permissions.py | Python | bsd-3-clause | 3,474 | 0.000576 | """
This test will use the default permissions found in
flaskbb.utils.populate
"""
from flaskbb.utils.permissions import *
def test_moderator_permissions_in_forum(
forum, moderator_user, topic, topic_moderator):
"""Test the moderator permissions in a forum where the user is a
moderator.
"""
assert moderator_user in forum.moderators
assert can_post_reply(moderator_user, forum)
assert can_post_topic(moderator_user, forum)
assert can_edit_post(moderator_user, topic.user_id, forum)
assert can_moderate(moderator_user, forum)
assert can_delete_post(moderator_user, topic.user_id, forum)
assert can_delete_topic(moderator_user, topic.user_id, forum)
def test_moderator_permissions_without_forum(
forum, moderator_user, topic, topic_moderator):
"""Test the moderator permissions in a forum where the user is not a
moderator.
"""
forum.moderators.remove(moderator_user)
assert not moderator_user in forum.moderators
assert not can_moderate(moderator_user, forum)
assert can_post_reply(moderator_user, forum)
assert can_post_topic(moderator_user, forum)
assert not can_edit_post(moderator_user, topic.user_id, forum)
assert not can_delete_post(moderator_user, topic.user_id, forum)
assert not can_delete_topic(moderator_user, topic.user_id, forum)
# Test with own topic
assert can_delete_post(moderator_user, topic_moderator.user_id, forum)
assert can_delete_topic(moderator_user, topic_moderator.user_id, forum)
assert can_edit_post(moderator_user, topic_moderator.user_id, forum)
# Test moderator permissions
assert can_edit_user(moderator_user)
assert can_ban_user(moderator_user)
def test_normal_permissions(forum, user, topic):
"""Test the permissions for a normal user."""
assert not can_moderate(user, forum)
assert can_post_reply(user, forum)
assert can_post_topic(user, forum)
assert can_edit_post(user, topic.user_id, forum)
assert not can_delete_post(user, topic.user_id, forum)
assert not can_delete_topic(user, topic.user_id, forum)
assert not can_edit_user(user)
assert not can_ban_user(user)
def test_admin_permissions(forum, admin_user, topic):
"""Test the permissions for a admin user."""
assert can_moderate(admin_user, forum)
assert can_post_reply(admin_user, forum)
assert can_post_top | ic(admin_user, forum)
assert can_edit_post(admin_user, topic.user_id, forum)
assert can_delete_post(admin_user, topic.user_id, forum)
assert can_delete_topic(admin_user, topic.user_id, forum)
assert can_edit_user(admin_user)
assert can_ban_user(admin_user)
def test_super_moderator_permissions(forum, super_moderator_user, topic):
"""Test the permissions for a super moderator use | r."""
assert can_moderate(super_moderator_user, forum)
assert can_post_reply(super_moderator_user, forum)
assert can_post_topic(super_moderator_user, forum)
assert can_edit_post(super_moderator_user, topic.user_id, forum)
assert can_delete_post(super_moderator_user, topic.user_id, forum)
assert can_delete_topic(super_moderator_user, topic.user_id, forum)
assert can_edit_user(super_moderator_user)
assert can_ban_user(super_moderator_user)
def test_can_moderate_without_permission(moderator_user):
"""Test can moderate for a moderator_user without a permission."""
assert can_moderate(moderator_user) == False
|
ethantang95/DIGITS | digits/extensions/data/imageProcessing/forms.py | Python | bsd-3-clause | 3,146 | 0.001907 | # Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
from flask.ext.wtf import Form
from wtforms import validators
from digits import utils
from digits.utils import subclass
from digits.utils.forms import validate_required_iff
@subclass
class DatasetForm(Form):
"""
A form used to create an image processing dataset
"""
def validate_folder_path(form, field):
if not field.data:
pass
else:
# make sure the filesystem path exists
if not os.path.exists(field.data) or not os.path.isdir(field.data):
raise validators.ValidationError(
'Folder does not exist or is not reachable')
else:
return True
feature_folder = utils.forms.StringField(
u'Feature image folder',
validators=[
validators.DataRequired(),
validate_folder_path,
],
tooltip="Indicate a folder full of images."
)
label_folder = utils.forms.StringField(
u'Label image folder',
validators=[
validators.DataRequired(),
validate_folder_path,
],
tooltip="Indicate a folder full of images. For each image in the feature"
" image folder there must be one corresponding image in the label"
" image folder. The label image must have the same filename except"
" for the extension, which may differ."
)
folder_pct_val = utils.forms.IntegerField(
u'% for validation',
default=10,
validators=[
validators.NumberRange(min=0, max=100)
],
tooltip="You can choose to set apart a certain percentage of images "
"from the training images for the validation set."
)
has_val_folder = utils.forms.BooleanField('Separate validation images',
default=False,
)
validation_feature_folder = utils.forms.StringField(
u'Validation feature image folder',
validators=[
validate_required_iff(has_val_folder=True),
validate_folder_path,
],
tooltip="Indicate a folder full of images."
)
validation_label_folder = utils.forms.StringField(
u'Validation label image folder',
validators=[
validate_required_iff(has_val_folder=True),
validate_folder_path,
],
tooltip="Indicate a folder full of images. For each image in the feature"
" image folder there must be one corresponding image in the label"
" image folder. The label image must have the same filename except"
" for the extension, which may differ."
)
channel_conversion = utils.forms.SelectField(
'Channel conversion',
choices=[
('RGB', 'RGB'),
('L', 'Grayscale'),
| ('non | e', 'None'),
],
default='none',
tooltip="Perform selected channel conversion."
)
|
pidpawel/komiksowiec | docs/source/conf.py | Python | mit | 9,830 | 0.000102 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Komiksowiec documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 3 23:11:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Komiksowiec'
copyright = '2017, Paweł pid Kozubal'
author = 'Paweł pid Kozubal'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'Komiksowiec v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# | Language to be used for generating the HTML full-text | search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Komiksowiecdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Komiksowiec.tex', 'Komiksowiec Documentation',
'Paweł pid Kozubal', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, sh |
antoinecarme/pyaf | tests/artificial/transf_RelativeDifference/trend_Lag1Trend/cycle_5/ar_/test_artificial_32_RelativeDifference_Lag1Trend_5__0.py | Python | bsd-3-clause | 271 | 0.084871 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 5, transform = "RelativeDifference", sigma = 0.0, exog_count = 0, a | r_order = 0); | |
0x0mar/MITMf | sslstrip/SSLServerConnectionHSTS.py | Python | gpl-3.0 | 4,966 | 0.015304 | # Copyright (c) 2004-2009 Moxie Marlinspike
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
import logging, re, string
from ServerConnectionHSTS import ServerConnection
class SSLServerConnection(ServerConnection):
'''
For SSL connections to a server, we need to do some additional stripping. First we need
to make note of any relative links, as the server will be expecting those to be requested
via SSL as well. We also want to slip our favicon in here and kill the secure bit on cookies.
'''
cookieExpression = re.compile(r"([ \w\d:#@%/;$()~_?\+-=\\\.&]+); ?Secure", re.IGNORECASE)
cssExpression = re.compile(r"url\(([\w\d:#@%/;$~_?\+-=\\\.&]+)\)", re.IGNORECASE)
iconExpression = re.compile(r"<link rel=\"shortcut icon\" .*href=\"([\w\d:#@%/;$()~_?\+-=\\\.&]+)\".*>", re.IGNORECASE)
linkExpression = re.compile(r"<((a)|(link)|(img)|(script)|(frame)) .*((href)|(src))=\"([\w\d:#@%/;$()~_?\+-=\\\.&]+)\".*>", re.IGNORECASE)
headExpression = re.compile(r"<head>", re.IGNORECASE)
def __init__(self, command, uri, postData, headers, client):
ServerConnection.__init__(self, command, uri, postData, headers, client)
def getLogLevel(self):
return logging.INFO
def getPostPrefix(self):
return "SECURE POST"
def handleHeader(self, key, value):
if (key.lower() == 'set-cookie'):
newvalues =[]
value = SSLServerConnection.cookieExpression.sub("\g<1>", value)
values = value.split(';')
for v in values:
if v[:7].lower()==' domain':
dominio=v.split("=")[1]
logging.debug("LEO Parsing cookie domain parameter: %s"%v)
real = self.urlMonitor.sustitucion
if dominio in real:
v=" Domain=%s"%real[dominio]
logging.debug("LEO New cookie domain parameter: %s"%v)
newvalues.append(v)
value = ';'.join(new | values)
if (key.lower() == 'access-control-allow-origin'):
value='*'
ServerConnection.handleHeader(self, key, value)
def stripFileFromPath(self, path):
(strippedPath, lastSlash, file) = path.rpartitio | n('/')
return strippedPath
def buildAbsoluteLink(self, link):
absoluteLink = ""
if ((not link.startswith('http')) and (not link.startswith('/'))):
absoluteLink = "http://"+self.headers['host']+self.stripFileFromPath(self.uri)+'/'+link
logging.debug("Found path-relative link in secure transmission: " + link)
logging.debug("New Absolute path-relative link: " + absoluteLink)
elif not link.startswith('http'):
absoluteLink = "http://"+self.headers['host']+link
logging.debug("Found relative link in secure transmission: " + link)
logging.debug("New Absolute link: " + absoluteLink)
if not absoluteLink == "":
absoluteLink = absoluteLink.replace('&', '&')
self.urlMonitor.addSecureLink(self.client.getClientIP(), absoluteLink);
def replaceCssLinks(self, data):
iterator = re.finditer(SSLServerConnection.cssExpression, data)
for match in iterator:
self.buildAbsoluteLink(match.group(1))
return data
def replaceFavicon(self, data):
match = re.search(SSLServerConnection.iconExpression, data)
if (match != None):
data = re.sub(SSLServerConnection.iconExpression,
"<link rel=\"SHORTCUT ICON\" href=\"/favicon-x-favicon-x.ico\">", data)
else:
data = re.sub(SSLServerConnection.headExpression,
"<head><link rel=\"SHORTCUT ICON\" href=\"/favicon-x-favicon-x.ico\">", data)
return data
def replaceSecureLinks(self, data):
data = ServerConnection.replaceSecureLinks(self, data)
data = self.replaceCssLinks(data)
if (self.urlMonitor.isFaviconSpoofing()):
data = self.replaceFavicon(data)
iterator = re.finditer(SSLServerConnection.linkExpression, data)
for match in iterator:
self.buildAbsoluteLink(match.group(10))
return data
|
MartinHjelmare/home-assistant | homeassistant/components/futurenow/light.py | Python | apache-2.0 | 3,852 | 0 | """Support for FutureNow Ethernet unit outputs as Lights."""
import logging
import voluptuous as vol
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_PORT, CONF_DEVICES)
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, Light,
PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_DRIVER = 'driver'
CONF_DRIVER_FNIP6X10AD = 'FNIP6x10ad'
CONF_DRIVER_FNIP8X10A = 'FNIP8x10a'
CONF_DRIVER_TYPES = [CONF_DRIVER_FNIP6X10AD, CONF_DRIVER_FNIP8X10A]
DEVICE_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Optional('dimmable', default=False): cv.boolean,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DRIVER): vol.In(CONF_DRIVER_TYPES),
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Required(CONF_DEVICES): {cv.string: DEVICE_SCHEMA},
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the light platform for each FutureNow unit."""
lights = []
for channel, device_config in config[CONF_DEVICES].items():
device = {}
device['name'] = device_config[CONF_NAME]
device['dimmable'] = device_config['dimmable']
device['channel'] = channel
device['driver'] = config[CONF_DRIVER]
device['host'] = config[CONF_HOST]
device['port'] = config[CONF_PORT]
lights.append(FutureNowLight(device))
add_entities(lights, True)
def to_futurenow_level(level):
"""Convert the given HASS light level (0-255) to FutureNow (0-100)."""
return int((level * 100) / 255)
def to_hass_level(level):
"""Convert the given FutureNow (0-100) light level to HASS (0-255)."""
return int((level * 255) / 100)
class FutureNowLight(Light):
"""Representation of an FutureNow light."""
def __init__(self, device):
"""Initialize the light."""
import pyfnip
self._name = device['name']
self._dimmable = device['dimmable']
self._channel = device['channel']
self._brightness = None
self._last_brightness = 255
self._state = None
if device['driver'] == CONF_DRIVER_FNIP6X10AD:
self._light = pyfnip.FNIP6x2adOutput(device['host'],
device['port'],
self._channel)
if device['driver'] == CONF_DRIVER_FNIP8X10A:
self._light = pyfnip.FNIP8x10aOutput(device['host'],
device['port'],
self._channel)
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property |
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def supported_features(self):
"""Flag supported features."""
if self._dimmable:
return SUPPORT_BRIGHTNESS
return | 0
def turn_on(self, **kwargs):
"""Turn the light on."""
if self._dimmable:
level = kwargs.get(ATTR_BRIGHTNESS, self._last_brightness)
else:
level = 255
self._light.turn_on(to_futurenow_level(level))
def turn_off(self, **kwargs):
"""Turn the light off."""
self._light.turn_off()
if self._brightness:
self._last_brightness = self._brightness
def update(self):
"""Fetch new state data for this light."""
state = int(self._light.is_on())
self._state = bool(state)
self._brightness = to_hass_level(state)
|
fsimkovic/cptbx | conkit/io/fasta.py | Python | gpl-3.0 | 4,452 | 0.000674 | # BSD 3-Clause License
#
# Copyright (c) 2016-19, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Parser module specific to FASTA sequence files
"""
__author__ = "Felix Simkovic"
__date__ = "09 Sep 2016"
__version__ = "0.1"
from conkit.io._parser import SequenceFileParser
from conkit.core.sequence import Sequence
from conkit.core.sequencefile import SequenceFile
class FastaParser(SequenceFileParser):
"""Parser class for FASTA sequence files
"""
def __init__(self):
super(FastaParser, self).__init__()
def read(self, f_handle, f_id="fasta"):
"""Read a sequence file
Parameters
----------
f_handle
Open file handle [read permissions]
f_id : str, optional
Unique sequence file identifier
Returns
-------
:obj:`~conkit.core.sequencefile.SequenceFile`
Raises
------
:exc:`ValueError`
FASTA record needs to start with >
"""
hierarchy = SequenceFile(f_id)
while True:
line = f_handle.readline().rstrip()
if not line:
continue
elif line.startswith("#"):
hierarchy.remark = line[1:]
elif line.startswith(">"):
break
while True:
if not line.startswith(">"):
raise ValueError("Fasta record needs to start with '>'")
id = line[1:] # Header without '>'
chunks = []
line = f_handle.readline().rstrip()
while True:
if not line:
break
| elif line.startswith(">"):
break
chunks.append(line)
line = f_handle.readline().rstrip()
_seq_string = "".join(chunks) # Sequence from chunks
sequence_entry = Sequence(id, _seq_string)
hierarchy.add(sequence_entry)
if not line:
| break
return hierarchy
def write(self, f_handle, hierarchy):
"""Write a sequence file instance to to file
Parameters
----------
f_handle
Open file handle [write permissions]
hierarchy : :obj:`~conkit.core.sequencefile.SequenceFile`, :obj:`~conkit.core.sequence.Sequence`
"""
hierarchy = self._reconstruct(hierarchy)
content = ""
for remark in hierarchy.remark:
content += "#{}\n".format(remark)
for sequence_entry in hierarchy:
header = ">{}".format(sequence_entry.id)
if len(sequence_entry.remark) > 0:
header = "|".join([header] + sequence_entry.remark)
content += header + "\n"
sequence_string = sequence_entry.seq.upper() # UPPER CASE !!!
for i in range(0, sequence_entry.seq_len, 60):
content += sequence_string[i : i + 60] + "\n"
f_handle.write(content)
|
jakubbrindza/gtg | GTG/backends/__init__.py | Python | gpl-3.0 | 8,418 | 0 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2013 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""
Backends are a way to permanently store a project on a medium
(like on the hard disk or on the internet)
and to read projects from this medium
"""
import sys
import uuid
import os.path
from GTG.tools.logger import Log
from GTG.tools.borg import Borg
from GTG.backends.genericbackend import GenericBackend
from GTG.core import firstrun_tasks
from GTG.core.dirs import PROJECTS_XMLFILE
from GTG.tools import cleanxml
class BackendFactory(Borg):
'''
This class holds the information about the backend types.
Since it's about types, all information is static. The instantiated
backends are handled in the Datastore.
It is a Borg for what matters its only state (_backend_modules),
since it makes no sense of keeping multiple instances of this.
'''
BACKEND_PREFIX = "backend_"
def __init__(self):
"""
Creates a dictionary of the currently available backend modules
"""
Borg.__init__(self)
if hasattr(self, "backend_modules"):
# This object has already been constructed
return
self.backend_modules = {}
backend_files = self._find_backend_files()
# Create module names
module_names = [f.replace(".py", "") for f in backend_files]
Log.debug("Backends found: " + str(module_names))
# Load backend modules
for module_name in module_names:
extended_module_name = "GTG.backends." + module_name
try:
__import__(extended_module_name)
except ImportError as exception:
# Something is wrong with this backend, skipping
Log.warning("Backend %s could not be loaded: %s" %
(module_name, str(exception)))
continue
except Exception as exception:
# Other exception log as errors
Log.error("Malformated backend %s: %s" %
(module_name, str(exception)))
continue
self.backend_modules[module_name] = \
sys.modules[extended_module_name]
def _find_backend_files(self):
# Look for backends in the GTG/backends dir
this_dir = os.path.dirname(__file__)
for filename in os.listdir(this_dir):
is_python = filename.endswith(".py")
has_prefix = filename.startswith(self.BACKEND_PREFIX)
if is_python and has_prefix:
yield filename
def get_backend(self, backend_name):
'''
Returns the backend module for the backend matching
backend_name. Else, returns none
'''
if backend_name in self.backend_modules:
return self.backend_modules[backend_name]
else:
Log.debug("Trying to load backend %s, but failed!" % backend_name)
return None
def get_all_backends(self):
'''
Returns a dictionary containing all the backends types
'''
return self.backend_modules
def get_new_backend_dict(self, backend_name, additional_parameters= | {}):
'''
Constructs a new backend initialization dictionary. In more
exact terms, creates a dictionary, containing all the necessary
entries to initialize a backend.
'''
if backend_name not in self.backend_modules:
return None
dic = {}
module = self.get_backend(backend_name)
# Different pids are necessary to discern between backends of the same
# type
paramet | ers = module.Backend.get_static_parameters()
# we all the parameters and their default values in dic
for param_name, param_dic in parameters.items():
dic[param_name] = param_dic[GenericBackend.PARAM_DEFAULT_VALUE]
dic["pid"] = str(uuid.uuid4())
dic["module"] = module.Backend.get_name()
for param_name, param_value in additional_parameters.items():
dic[param_name] = param_value
dic["backend"] = module.Backend(dic)
return dic
def restore_backend_from_xml(self, dic):
'''
Function restoring a backend from its xml description.
dic should be a dictionary containing at least the key
- "module", with the module name
- "xmlobject", with its xml description.
Every other key is passed as-is to the backend, as parameter.
Returns the backend instance, or None is something goes wrong
'''
if "module" not in dic or "xmlobject" not in dic:
Log.debug("Malformed backend configuration found! %s" %
dic)
module = self.get_backend(dic["module"])
if module is None:
Log.debug("could not load module for backend %s" %
dic["module"])
return None
# we pop the xml object, as it will be redundant when the parameters
# are set directly in the dict
xp = dic.pop("xmlobject")
# Building the dictionary
parameters_specs = module.Backend.get_static_parameters()
dic["pid"] = str(xp.getAttribute("pid"))
for param_name, param_dic in parameters_specs.items():
if xp.hasAttribute(param_name):
# we need to convert the parameter to the right format.
# we fetch the format from the static_parameters
param_type = param_dic[GenericBackend.PARAM_TYPE]
param_value = GenericBackend.cast_param_type_from_string(
xp.getAttribute(param_name), param_type)
dic[param_name] = param_value
# We put the backend itself in the dict
dic["backend"] = module.Backend(dic)
return dic["backend"]
def get_saved_backends_list(self):
backends_dic = self._read_backend_configuration_file()
# Retrocompatibility: default backend has changed name
for dic in backends_dic:
if dic["module"] == "localfile":
dic["module"] = "backend_localfile"
dic["pid"] = str(uuid.uuid4())
dic["need_conversion"] = \
dic["xmlobject"].getAttribute("filename")
# Now that the backend list is build, we will construct them
for dic in backends_dic:
self.restore_backend_from_xml(dic)
# If no backend available, we create a new using localfile. Xmlobject
# will be filled in by the backend
if len(backends_dic) == 0:
dic = BackendFactory().get_new_backend_dict(
"backend_localfile")
dic["backend"].this_is_the_first_run(firstrun_tasks.populate())
backends_dic.append(dic)
return backends_dic
def _read_backend_configuration_file(self):
'''
Reads the file describing the current backend configuration
(project.xml) and returns a list of dictionaries, each containing:
- the xml object defining the backend characteristics under
"xmlobject"
- the name of the backend under "module"
'''
# Read configuration file, if it does not exist, create one
doc, configxml = cleanxml.openxml |
Elchi3/kuma | kuma/users/providers/google/views.py | Python | mpl-2.0 | 1,558 | 0.003209 | from urllib.parse import urlparse
from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter
from allauth.socialaccount.providers.oauth2.views import (
OAuth2CallbackView,
OAuth2LoginView,
)
from kuma.core.decorators import redirect_in_maintenance_mode
from kuma.core.ga_tracking import ACTION_AUTH_STARTED, CATEGORY_SIGNUP_FLOW, track_event
class KumaOAuth2LoginView(OAuth2LoginView):
def dispatch(self, request):
# TODO: Figure out a way to NOT trigger the "ACTION_AUTH_STARTED" when
# simply following the link. We've seen far too many submissions when
# curl or some browser extensions follow the link but not actually being
# users who proceed "earnestly".
# For now, to make a simple distinction between uses of `curl` and normal
# browser clicks we check that a HTTP_REFERER is actually set and comes
# from the same host as the request.
# Note! This is the same in kuma.users.providers.github.KumaOAuth2LoginView |
# See https://github.com/mdn/kuma/issues/6759
ht | tp_referer = request.META.get("HTTP_REFERER")
if http_referer:
if urlparse(http_referer).netloc == request.get_host():
track_event(CATEGORY_SIGNUP_FLOW, ACTION_AUTH_STARTED, "google")
return super().dispatch(request)
oauth2_login = redirect_in_maintenance_mode(
KumaOAuth2LoginView.adapter_view(GoogleOAuth2Adapter)
)
oauth2_callback = redirect_in_maintenance_mode(
OAuth2CallbackView.adapter_view(GoogleOAuth2Adapter)
)
|
mac389/inuit | src/get-facebook-posts.py | Python | apache-2.0 | 248 | 0.020161 | import requests, json
tokens = json.load(open('tokens.json','rb'))
APP_ID = token | s['id']
APP_SECRET = tokens['secret']
ACCESS_TOKEN = tokens['long_access_token']
base = 'http://graph.facebook/com'
payload = {''}
r | = requsts.get(base,payload)
|
NUKnightLab/cityhallmonitor | cityhallmonitor/migrations/0003_matterattachment_link_obtained_at.py | Python | mit | 440 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django | .db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cityhallmonitor', '0002_matter_attachments_obtained_at'),
]
operations = [
migrations.AddField(
model_name='matterattachment',
name='l | ink_obtained_at',
field=models.DateTimeField(null=True),
),
]
|
oridgar/PersonaLab | personalab/drivers/objects/libvirt.py | Python | gpl-3.0 | 583 | 0.006861 | from personalab.drivers.objects.base import Vm
class LibVirtVm(Vm):
"""
:type base_object: libvirt.virDomain
"""
def __init__(self,domain):
#self.base_object = libv | irt.virDomain()
self.base_object = domain
#super(self.__class__, self).__init__(domain)
def start(self):
self.base_object.create()
def stop(self):
self.base_object.destroy()
def restart(self):
self.base_object.reboot()
def reset(self):
self.base_object.reset()
def get_name(self):
| return self.base_object.name() |
free-free/pyblog | pyblog/cache/cache_abstract.py | Python | mit | 1,626 | 0.000615 | #-*- coding:utf-8 -*-
class CacheAbstractDriver:
def __init__(self, *args, **kw):
pass
def put(self, key, value, expires, key_prefix):
r"""
@put a value to cache system
@parameters:
| key :must be 'str' type or 'dict ' type
when 'key' is 'dict' type,value must be None value
value:list,tuple,dict,None,one of them
expires:must be 'int' type,units is seconds
key_prefix: 'str' type
"""
raise NotImplementedError
def get(self, key, key_prefix):
r"""
@get a cache value from cache sys | tem
"""
raise NotImplementedError
def get_delete(self, key, key_prefix):
r"""
@get and delete a cache value from cache system
"""
raise NotImplementedError
def delete(self, key, key_prefix):
r"""
@delete a cache value from cache system
"""
raise NotImplementedError
def update(self, key, value, expires, key_prefix):
r"""
@update a cache value to cache system
"""
raise NotImplementedError
def exists(self, key, key_prefix):
r"""
@check a cache value whether existen
"""
raise NotImplementedError
def increment(self, key, delta, key_prefix):
r"""
@increment a cahce value
"""
raise NotImplementedError
def decrement(self, key, delta, key_prefix):
r"""
@decrement a cache value
"""
raise NotImplementedError
|
zmeadows/bloch_fit | plot/plot_filter_fft.py | Python | mit | 702 | 0.007123 | import sys
sys.path.append('/Users/zac/Research/muon_g2_2015/nmr/bloch_fit')
import matplotlib.pyplot as plt
from pulse import NMRPulseFiltered
import util as u
impor | t numpy as np
import math
p = NMRPulseFiltered(u.get_pulse_path())
d_hz = p.raw_freqs[1] - p.raw_freqs[0]
ei = (p.w_ref * 8 / (2*math.pi)) / d_hz
plt.figure(facecolor=u.LIGHTGREY)
plt.plot(p.raw_freqs[:ei], np.abs(p.raw_fft)[:ei], color=u.BLUE)
plt.plot(p.raw_freqs[:ei], np.abs(p.filter_fft)[:ei], color=u.RED, alpha=0.7)
plt.grid()
plt.gca().set_axis_bgcolor(u.GREY)
plt.yscale('log')
plt.title("FILTERED PULSE FFT")
plt.xlabel("Frequency (Hz)")
plt.ylim(n | p.min(0.99*np.abs(p.raw_fft)), 1.1*np.max(np.abs(p.raw_fft)))
plt.show()
|
msullivan/advent-of-code | 2015/A8b.py | Python | mit | 340 | 0.005882 | #!/usr/bin/env python3
import sys |
def main(args):
cmds = [s.strip() for s in sys.stdin]
sum = 0
for i in cmds:
a = i
a = a.replace('\\', '\\\\')
a = a.replace("\"", '\\"')
a = '"' + a + '"'
sum += len(a) - len(i)
print(sum)
if __ | name__ == '__main__':
sys.exit(main(sys.argv))
|
MichaelAquilina/pytest | testing/test_terminal.py | Python | mit | 31,303 | 0.000096 | """
terminal reporting of the full testing process.
"""
from __future__ import absolute_import, division, print_function
import collections
import sys
import _pytest._pluggy as pluggy
import _pytest._code
import py
import pytest
from _pytest.main import EXIT_NOTESTSCOLLECTED
from _pytest.terminal import TerminalReporter, repr_pythonversion, getreportopt
from _pytest.terminal import build_summary_stats_line, _plugin_nameversions
DistInfo = collections.namedtuple('DistInfo', ['project_name', 'version'])
class Option(object):
def __init__(self, verbose=False, fulltrace=False):
self.verbose = verbose
self.fulltrace = fulltrace
@property
def args(self):
l = []
if self.verbose:
l.append('-v')
if self.fulltrace:
l.append('--fulltrace')
return l
def pytest_generate_tests(metafunc):
if "option" in metafunc.fixturenames:
metafunc.addcall(id="default",
funcargs={'option': Option(verbose=False)})
metafunc.addcall(id="verbose",
funcargs={'option': Option(verbose=True)})
metafunc.addcall(id="quiet",
funcargs={'option': Option(verbose=-1)})
metafunc.addcall(id="fulltrace",
funcargs={'option': Option(fulltrace=True)})
@pytest.mark.parametrize('input,expected', [
([DistInfo(project_name='test', version=1)], ['test-1']),
([DistInfo(project_name='pytest-test', version=1)], ['test-1']),
([
DistInfo(project_name='test', version=1),
DistInfo(project_name='test', version=1)
], ['test-1']),
], ids=['normal', 'prefix-strip', 'deduplicate'])
def test_plugin_nameversion(input, expected):
pluginlist = [(None, x) for x in input]
result = _plugin_nameversions(pluginlist)
assert result == expected
class TestTerminal(object):
def test_pass_skip_fail(self, testdir, option):
testdir.makepyfile("""
import pytest
def test_ok():
pass
def test_skip():
pytest.skip("xx")
def test_func():
assert 0
""")
result = testdir.runpytest(*option.args)
if option.verbose:
result.stdout.fnmatch_lines([
"*test_pass_skip_fail.py::test_ok PASS*",
"*test_pass_skip_fail.py::test_skip SKIP*",
"*test_pass_skip_fail.py::test_func FAIL*",
])
else:
result.stdout.fnmatch_lines([
"*test_pass_skip_fail.py .sF"
])
result.stdout.fnmatch_lines([
" def test_func():",
"> assert 0",
"E assert 0",
])
def test_internalerror(self, testdir, linecomp):
modcol = testdir.getmodulecol("def test_one(): pass")
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
excinfo = pytest.raises(ValueError, "raise ValueError('hello')")
rep.pytest_internalerror(excinfo.getrepr())
linecomp.assert_contains_lines([
"INTERNALERROR> *ValueError*hello*"
])
def test_writeline(self, testdir, linecomp):
modcol = testdir.getmodulecol("def test_one(): pass")
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
rep.write_fspath_result(modcol.nodeid, ".")
rep.write_line("hello world")
lines = linecomp.stringio.getvalue().split('\n')
assert not lines[0]
assert lines[1].endswith(modcol.name + " .")
assert lines[2] == "hello world"
def test_show_runtest_logstart(self, testdir, linecomp):
item = testdir.getitem("def test_func(): pass")
tr = TerminalReporter(item.config, file=linecomp.stringio)
item.config.pluginmanager.register(tr)
location = item.reportinfo()
tr.config.hook.pytest_runtest_logstart(nodeid=item.nodeid,
location=location, fspath=str(item.fspath))
linecomp.assert_contains_lines([
"*test_show_runtest_logstart.py*"
])
def test_runtest_location_shown_before_test_starts(self, testdir):
testdir. | makepyfile("""
def test_1():
import time
time.sleep(20)
""")
child = testdir.spawn_pytest("")
child.expect(".*test_runtest_location.*py")
child.sendeof()
child.kill(15)
def test_itemreport_subclasses_show_subc | lassed_file(self, testdir):
testdir.makepyfile(test_p1="""
class BaseTests(object):
def test_p1(self):
pass
class TestClass(BaseTests):
pass
""")
p2 = testdir.makepyfile(test_p2="""
from test_p1 import BaseTests
class TestMore(BaseTests):
pass
""")
result = testdir.runpytest(p2)
result.stdout.fnmatch_lines([
"*test_p2.py .",
"*1 passed*",
])
result = testdir.runpytest("-v", p2)
result.stdout.fnmatch_lines([
"*test_p2.py::TestMore::test_p1* <- *test_p1.py*PASSED",
])
def test_itemreport_directclasses_not_shown_as_subclasses(self, testdir):
a = testdir.mkpydir("a123")
a.join("test_hello123.py").write(_pytest._code.Source("""
class TestClass(object):
def test_method(self):
pass
"""))
result = testdir.runpytest("-v")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*a123/test_hello123.py*PASS*",
])
assert " <- " not in result.stdout.str()
def test_keyboard_interrupt(self, testdir, option):
testdir.makepyfile("""
def test_foobar():
assert 0
def test_spamegg():
import py; pytest.skip('skip me please!')
def test_interrupt_me():
raise KeyboardInterrupt # simulating the user
""")
result = testdir.runpytest(*option.args, no_reraise_ctrlc=True)
result.stdout.fnmatch_lines([
" def test_foobar():",
"> assert 0",
"E assert 0",
"*_keyboard_interrupt.py:6: KeyboardInterrupt*",
])
if option.fulltrace:
result.stdout.fnmatch_lines([
"*raise KeyboardInterrupt # simulating the user*",
])
else:
result.stdout.fnmatch_lines([
"to show a full traceback on KeyboardInterrupt use --fulltrace"
])
result.stdout.fnmatch_lines(['*KeyboardInterrupt*'])
def test_keyboard_in_sessionstart(self, testdir):
testdir.makeconftest("""
def pytest_sessionstart():
raise KeyboardInterrupt
""")
testdir.makepyfile("""
def test_foobar():
pass
""")
result = testdir.runpytest(no_reraise_ctrlc=True)
assert result.ret == 2
result.stdout.fnmatch_lines(['*KeyboardInterrupt*'])
def test_collect_single_item(self, testdir):
"""Use singular 'item' when reporting a single test item"""
testdir.makepyfile("""
def test_foobar():
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(['collected 1 item'])
def test_rewrite(self, testdir, monkeypatch):
config = testdir.parseconfig()
f = py.io.TextIO()
monkeypatch.setattr(f, 'isatty', lambda *args: True)
tr = TerminalReporter(config, f)
tr.writer.fullwidth = 10
tr.write('hello')
tr.rewrite('hey', erase=True)
assert f.getvalue() == 'hello' + '\r' + 'hey' + (7 * ' ')
class TestCollectonly(object):
def test_collectonly_basic(self, testdir):
testdir.makepyfile("""
def test_func():
pass
""")
result = testdir.runpytest("--collect-only",)
result.stdout.fnmatch_lines([
"<Module 'test_collectonly_basic.py' |
hatbot-team/hatbot | lang_utils/morphology/word_forms.py | Python | mit | 2,577 | 0.00369 | from pymorphy2.analyzer import Parse
__author__ = 'moskupols'
from lang_utils.morphology import morph
# def get_forms(initial):
# """
# Get all possible forms of a given word in initial form.
#
# >>> get_forms('мама')
# ['мам', 'мама', 'мамам', 'мамами', 'мамах', 'маме', 'мамой', 'мамою', 'маму', 'мамы']
# >>> get_forms('мыла')
#
# :param initial: a russian word in its initial form, in lower case
# :return: a list of forms if the word is found in the dictionary, None otherwise
# """
# parsed = morph.parse(initial)
# ans = []
# for p in parsed:
# | ans.extend(p.inflect(...))
def get_initial_forms(form: str, part_filter=None)->list:
"""
Gets all possible initial forms (there are several of them sometimes) of a given word.
Optional argument part_filter allows to prune unnecessary ambiguity with part of speech.
>>> get_initial_forms('Дядя')
['дядя']
>>> get_initial_forms('самых' | )
['самый']
>>> get_initial_forms('честных')
['честной', 'честный']
>>> get_initial_forms('правил')
['правило', 'править']
>>> get_initial_forms('правил', 'NOUN')
['правило']
>>> get_initial_forms('правил', ['VERB'])
['править']
:param form: a russian word
:param part_filter: something that supports `in' operator: str, list, set etc. If it is a container,
it should contain only Part-of-speech names according to pymorphy2 enumerations
:return: a list of possible initial forms of the given word in lowercase.
It's guaranteed that there are no repetitions.
Variants are generated in the order of descending certainty.
"""
met = set()
ret = []
for p in morph.parse(form):
# if p.score < .1:
# continue
if part_filter is None or p.tag.POS in part_filter:
norm = p.normal_form
if norm not in met:
ret.append(norm)
met.add(norm)
return ret
def _is_valid_noun(parsed: Parse)->bool:
# TODO: add surname and all, see http://opencorpora.org/dict.php?act=gram
# even Init!
tag = parsed.tag
return tag.POS == 'NOUN' and 'Name' not in str(tag) # Dirty but so cool!
def get_valid_noun_initial_form(word: str)->str:
possible_forms = [p for p in morph.parse(word) if _is_valid_noun(p)]
if len(possible_forms) == 0:
return None
else:
return possible_forms[0].normal_form
|
MKridler/pyxley | pyxley/charts/nvd3/two_axis_focus.py | Python | mit | 2,870 | 0.003833 |
from .nvd3 import NVD3
from flask import jsonify, request
import numpy as np
class TwoAxisFocus(NVD3):
_allowed_axes = ["sigma", "minmax"]
def __init__(self, x, y1, y2, data_source, init_params={},
chart_id="new_chart", url="/new_chart/", colors=[], auto_scale="sigma",
y1_axis_range=[], y2_axis_range=[], sigma=3,
x_label="", y1_label="", y2_label="",
margin={"top": 30, "right": 60, "bottom": 50, "left": 70}):
self.x = x
self.y1 = y1
self.y2 = y2
self.auto_scale = auto_scale if auto_scale in self._allowed_axes else "sigma"
self.sigma = 3
self.y1_axis_range = y1_axis_range
self.y2_axis_range = y2_axis_range
self.options = {
"type": "TwoAxisFocus",
"chartid": chart_id,
"url": url,
"colors": colors,
"init_params": init_params,
"labels": {
"xAxis": x_label,
"yAxis1": y1_label,
"yAxis2": y2_label
},
"margin": margin,
"type": "TwoAxisFocus"
}
def get_data():
args = {}
for c in init_params:
if request.args.get(c):
args[c] = request.args[c]
else:
args[c] = init_params[c]
return jsonify(self.to_json(
self.apply_filters(data_source, args)
))
super(TwoAxisFocus, self).__init__(self.options, get_d | ata)
def get_bounds(self, y, method="sigma"):
if self.auto_scale == "sigma":
m_, s_ = y.mean(), y.std()
l = m_ - self.sigma*s_
u = m_ + sel | f.sigma*s_
else:
l = y.min()
u = y.max()
return [l, u]
def to_json(self, df):
if df.empty:
return {
"data": [],
"yAxis1": {"lower": 0, "upper": 1},
"yAxis2": {"lower": 0, "upper": 1}
}
if not self.y1_axis_range:
bounds1 = self.get_bounds(df[self.y1], method=self.auto_scale)
else:
bounds1 = self.y1_axis_range
if not self.y2_axis_range:
bounds2 = self.get_bounds(df[self.y2], method=self.auto_scale)
else:
bounds2 = self.y2_axis_range
records = [
{"key": self.y1, "values": [], "yAxis": 1, "type": "line"},
{"key": self.y2, "values": [], "yAxis": 2, "type": "line"}
]
for n, r in df.iterrows():
records[0]["values"].append({"x": r[self.x], "y": r[self.y1]})
records[1]["values"].append({"x": r[self.x], "y": r[self.y2]})
return {
"data": records,
"yAxis1": {"bounds": bounds1},
"yAxis2": {"bounds": bounds2}
}
|
kpj/PySpaMo | automata.py | Python | mit | 3,756 | 0.000532 | """
Implementation of several spatial models running on a lattice
"""
import numpy as np
from tqdm import tqdm
class CellularAutomaton(object):
""" General CA class
"""
def __init__(self, lattice):
self.lattice = lattice
def setup(self):
""" This function is called before any iteration step takes place
"""
pass
| def apply_ru | le(self, mat):
""" Return the new lattice
"""
raise NotImplementedError
def iterate(self, steps, **kwargs):
""" Yield each step up to `steps` of the current CA
"""
self.setup(**kwargs)
yield self.lattice
for _ in tqdm(range(steps)):
self.lattice = self.apply_rule(self.lattice)
yield self.lattice
raise StopIteration
def get_neighbours(self, pos, mat=None):
""" Get elements in Moore neighborhood of given position.
If no particular matrix is specified,
the current lattice will be used.
"""
if mat is None:
mat = self.lattice
r, c = pos
nr = r+1 if r+1 < mat.shape[0] else 0
nc = c+1 if c+1 < mat.shape[1] else 0
positions = [
(r-1, c-1), (r-1, c), (r-1, nc),
(r, c-1), (r, nc),
(nr, c-1), (nr, c), (nr, nc)
]
return [(mat[row, col], (row, col)) for row, col in positions]
class GameOfLife(CellularAutomaton):
""" Simulate Conway's Game of Life
"""
def apply_rule(self, mat):
""" Possibly the worst implementation of Game of life ever
"""
next_mat = np.zeros(mat.shape)
for row in range(mat.shape[0]):
for col in range(mat.shape[1]):
num = sum([e for e, p in self.get_neighbours((row, col))])
if mat[row, col] == 1:
if num >= 2 and num <= 3:
next_mat[row, col] = 1
else:
if num == 3:
next_mat[row, col] = 1
return next_mat
class SnowDrift(CellularAutomaton):
""" Simulate game according to some payoff matrix
"""
def setup(self, benefit=0.6, cost=0.2):
""" Generate payoff matrix. It's of the form
[
[<C-C>, <C-D>],
[<D-C>, <D-D>]
]
C -> cooperate
D -> defect
"""
self.payoff_mat = np.array([
[benefit - cost / 2, benefit - cost],
[benefit, 0]
])
def get_payoff(self, own_strat, other_strat):
""" Get corresponding payoff.
Assume existance of only two strategies:
0 -> cooperate
1 -> defect
"""
return self.payoff_mat[own_strat, other_strat]
def apply_rule(self, mat):
""" Some stuff
"""
next_mat = np.zeros(mat.shape)
fitness_mat = np.zeros(self.lattice.shape)
# compute fitness matrix
for row in range(mat.shape[0]):
for col in range(mat.shape[1]):
cur = mat[row, col]
ns = self.get_neighbours((row, col))
fitness = 0
for n, p in ns:
fitness += self.get_payoff(cur, n)
fitness /= len(ns)
fitness_mat[row, col] += fitness
# let the games begin
for row in range(mat.shape[0]):
for col in range(mat.shape[1]):
curf = fitness_mat[row, col]
nsf = self.get_neighbours((row, col), mat=fitness_mat)
for nf, p in nsf:
if nf > curf:
next_mat[row, col] = mat[p[0], p[1]]
return next_mat
|
cshzc/X1Tool | src/server/apps/utils/__init__.py | Python | gpl-2.0 | 25 | 0 | __author__ = 'zh | ichengh'
| |
rsignell-usgs/notebook | tri_tide_movie/python/tri_tide_movie.py | Python | mit | 5,982 | 0.004681 |
# coding: utf-8
# In[7]:
import pytz
from datetime import datetime
from pandas import date_range
import iris
import warnings
import pyugrid
# In[8]:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# ncfile = ('http://geoport.whoi.edu/thredds/dodsC/usgs/vault0/models/tides/'
# 'vdatum_gulf_of_maine/adcirc54_38_orig.nc')
url = ('http://geoport.whoi.edu/thredds/dodsC/usgs/vault0/models/tides/'
'vdatum_fl_sab/adcirc54.nc')
cubes = iris.load_raw(url)
print(cubes)
# In[9]:
units = dict({'knots': 1.9438, 'm/s': 1.0})
consts = ['STEADY', 'M2', 'S2', 'N2', 'K1', 'O1', 'P1', 'M4', 'M6']
bbox = [-70.7234, -70.4532, 41.4258, 41.5643] # Vineyard sound 2.
bbox = [-85.25, -84.75, 29.58, 29.83] # Apalachicola Bay
halo = 0.1
ax2 = [bbox[0] - halo * (bbox[1] - bbox[0]),
bbox[1] + halo * (bbox[1] - bbox[0]),
bbox[2] - halo * (bbox[3] - bbox[2]),
bbox[3] + halo * (bbox[3] - bbox[2])]
# In[10]:
start = datetime.strptime('18-Sep-2015 05:00',
'%d-%b-%Y %H:%M').replace(tzinfo=pytz.utc)
stop = datetime.strptime('19-Sep-2015 05:00', # '18-Sep-2015 18:00'
'%d-%b-%Y %H:%M').replace(tzinfo=pytz.utc)
dt = 1.0 # Hours.
glocals = date_range(start, stop, freq='1H').to_pydatetime()
ntimes = len(glocals)
# In[11]:
def parse_string(name):
return ''.join(name.tolist()).strip()
names = []
data = cubes.extract_strict('Tide Constituent').data
for name in data:
names.append(parse_string(name))
# In[12]:
ug = pyugrid.UGrid.from_ncfile(url)
lonf = ug.nodes[:,0]
latf = ug.nodes[:,1]
nv = ug.faces[:]
frequency = cubes.extract_strict('Tide Frequency').data
# In[ ]:
# Find indices in box.
import numpy as np
inbox = np.logical_and(np.logical_and(lonf >= ax2[0],
lonf <= ax2[1]),
np.logical_and(latf >= ax2[2],
latf <= ax2[3]))
lon = lonf[inbox]
lat = latf[inbox]
# In[ ]:
import os.path
from scipy.io import loadmat
mat = os.path.join('..', 't_tide_v1.3beta', 't_constituents.mat')
con_info = loadmat(mat, squeeze_me=True)
con_info = con_info['const'] # I am ignore shallow water and sat constants!
# In[ ]:
from utide import _ut_constants_fname
from utide.utilities import loadmatbunch
con_info = loadmatbunch(_ut_constants_fname)['const']
# In[ ]:
# Find the indices of the tidal constituents.
k = 0
ind_nc, ind_ttide = [], []
const_name = [e.strip() for e in con_info['name'].tolist()]
for name in consts:
try:
if name == 'STEADY':
indx = const_name.index('Z0')
else:
indx = const_name.index(name)
k += 1
ind_ttide.append(indx)
ind_nc.append(names.index(name))
except ValueError:
pass # `const` not found.
# In[ ]:
ua = cubes.extract_strict('Eastward Water Velocity Amplitude')
up = cubes.extract_strict('Eastward Water Velocity Phase')
va = cubes.extract_strict('Northward Water Velocity Amplitude')
vp = cubes.extract_strict('Northward Water Velocity Phase')
# In[ ]:
ua.shape
# In[ ]:
uamp = ua.data[0, inbox, :][:, ind_nc]
vamp = va.data[0, inbox, :][:, ind_nc]
upha = up.data[0, inbox, :][:, ind_nc]
vpha = vp.data[0, inbox, :][:, ind_nc]
# In[ ]:
freq_nc = frequency[ind_nc]
# In[ ]:
print uamp.shape
print freq_nc.shape
# In[ ]:
freq_ttide = con_info['freq'][ind_ttide]
# In[ ]:
t_tide_names = np.array(const_name)[ind_ttide]
# In[ ]:
omega_ttide = 2*np.pi * freq_ttide # Convert from radians/s to radians/hour.
omega = freq_nc * 3600
rllat = 55 # Reference latitude for 3rd order satellites (degrees) (55 is fine always)
# In[ ]:
from matplotlib.dates import date2num
# Convert to Matlab datenum.
# (Soon UTide will take python datetime objects.)
jd_start = date2num(start) + 366.1667
# In[ ]:
from utide.harmonics import FUV
# NB: I am not a 100% sure if this is identical to what we had with t_tide.
# ngflgs -> [NodsatLint NodsatNone GwchLint GwchNone]
v, u, f = FUV(t=np.array([jd_start]), tref=np.array([0]),
lind=np.array([ind_ttide]),
lat=55, ngflgs=[0, 0, 0, 0])
# In[ ]:
# Convert phase in radians.
v, u, f = map(np.squeeze, (v, u, f))
v = v * 2 * np.pi
u = u * 2 * np.pi
thours = np.array([d.total_seconds() for d in
(glocals - glocals[0])]) / 60 / 60.
# In[ ]:
get_ipython().magic(u'matplotlib inline')
import matplotlib.pyplot as plt
from JSAnimation import IPython_display
from matplotlib.animation import FuncAnimation
def update_figure(k):
global ax, fig
ax.cla()
U = (f * uamp * np.cos(v + thours[k] * omega + u - upha * np.pi/180)).sum(axis=1)
V = (f * vamp * np.cos(v + thours[k] * omega + u - vpha * np.pi/180)).sum(axis=1)
|
w = units['knots'] * (U + 1j * V)
wf = np.NaN * np.ones_like(lonf, dtype=w.dtype)
wf[inbox] = w
# FIXME: Cannot use masked arrays and tricontour!
# wf = ma.masked_invalid(wf)
# cs = ax.tricontour(lonf, la | tf, trif, np.abs(wf).filled(fill_value=0))
# fig.colorbar(cs)
q = ax.quiver(lon, lat, U, V, scale=40)
ax.axis(bbox) # Vineyard sound 2.
ax.set_title('{}'.format(glocals[k]))
fig, ax = plt.subplots(figsize=(7, 5))
FuncAnimation(fig, update_figure, interval=100, frames=ntimes)
# In[ ]:
plt.figure(figsize=(12,12))
U = (f * uamp * np.cos(v + thours[k] * omega + u - upha * np.pi/180)).sum(axis=1)
V = (f * vamp * np.cos(v + thours[k] * omega + u - vpha * np.pi/180)).sum(axis=1)
w = units['knots'] * (U + 1j * V)
wf = np.NaN * np.ones_like(lonf, dtype=w.dtype)
wf[inbox] = w
# FIXME: Cannot use masked arrays and tricontour!
# wf = ma.masked_invalid(wf)
# cs = ax.tricontour(lonf, latf, trif, np.abs(wf).filled(fill_value=0))
# fig.colorbar(cs)
q = plt.quiver(lon, lat, U, V, scale=40)
plt.axis(bbox) # Vineyard sound 2.
#q.set_title('{}'.format(glocals[k]))
# In[ ]:
iris.__version__
# In[ ]:
|
qrkourier/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_subnet.py | Python | gpl-3.0 | 9,267 | 0.002806 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: ec2_vpc_subnet
short_description: Manage subnets in AWS virtual private clouds
description:
- Manage subnets in AWS virtual private clouds
version_added: "2.0"
author: Robert Estelle (@erydo), Brad Davidson (@brandond)
requirements: [ boto3 ]
options:
az:
description:
- "The availability zone for the subnet. Only required when state=present."
required: false
default: null
cidr:
description:
- "The CIDR block for the subnet. E.g. 192.0.2.0/24. Only required when state=present."
required: false
default: null
tags:
description:
- "A dict of tags to apply to the subnet. Any tags currently applied to the subnet and not present here will be removed."
required: false
default: null
aliases: [ 'resource_tags' ]
state:
description:
- "Create or remove the subnet"
required: false
default: present
choices: [ 'present', 'absent' ]
vpc_id:
description:
- "VPC ID of the VPC in which to create the subnet."
required: false
default: null
map_public:
description:
- "Specify true to indicate that instances launched into the subnet should be assigned public IP address by default."
required: false
default: false
version_added: "2.4"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Create subnet for database servers
ec2_vpc_subnet:
state: present
vpc_id: vpc-123456
cidr: 10.0.1.16/28
resource_tags:
Name: Database Subnet
register: database_subnet
- name: Remove subnet for database servers
ec2_vpc_subnet:
state: absent
vpc_id: vpc-123456
cidr: 10.0.1.16/28
'''
import time
import traceback
try:
import botocore
except ImportError:
pass # caught by imported boto3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, ansible_dict_to_boto3_tag_list,
ec2_argument_spec, camel_dict_to_snake_dict, get_aws_connection_info,
boto3_conn, boto3_tag_list_to_ansible_dict, HAS_BOTO3)
def get_subnet_info(subnet):
if 'Subnets' in subnet:
return [get_subnet_info(s) for s in subnet['Subnets']]
elif 'Subnet' in subnet:
subnet = camel_dict_to_snake_dict(subnet['Subnet'])
else:
subnet = camel_dict_to_snake_dict(subnet)
if 'tags' in subnet:
subnet['tags'] = boto3_tag_list_to_ansible_dict(subnet['tags'])
else:
subnet['tags'] = dict()
if 'subnet_id' in subnet:
subnet['id'] = subnet['subnet_id']
del subnet['subnet_id']
return subnet
def subnet_exists(conn, subnet_id):
filters = ansible_dict_to_boto3_filter_list({'subnet-id': subnet_id})
subnets = get_subnet_info(conn.describe_subnets(Filters=filters))
if len(subnets) > 0 and 'state' in subnets[0] and subnets[0]['state'] == "available":
return subnets[0]
else:
return False
def create_subnet(conn, module, vpc_id, cidr, az, check_mode):
try:
new_subnet = get_subnet_info(conn.create_subnet(VpcId=vpc_id, CidrBlock=cidr, AvailabilityZone=az))
# Sometimes AWS takes its time to create a subnet and so using
# new subnets's id to do things like create tags results in
# exception. boto doesn't seem to refresh 'state' of the newly
# created subnet, i.e.: it's always 'pending'.
subnet = False
while subnet is False:
subnet = subnet_exists(conn, new_subnet['id'])
time.sleep(0.1)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "DryRunOperation":
subnet = None
else:
module.fail_json(msg=e.message, exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
return subnet
def ensure_tags(conn, module, subnet, tags, add_only, check_mode):
try:
cur_tags = subnet['tags']
to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
if to_delete and not add_only and not check_mode:
conn.delete_tags(Resources=[subnet['id']], Tags=ansible_dict_to_boto3_tag_list(to_delet | e))
to_add = dict((k, tags[k]) for k in tags if k not in cur_tags or cur_tags[k] != tags[k])
if to_add and not check_mode:
conn.create_tags(Resources=[subnet['id']], Tags=ansible_dict_to_boto3_tag_list(to_add))
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != "DryRunOperation":
module.fail_json(msg=e.message, exception=traceback.format_exc(),
**camel | _dict_to_snake_dict(e.response))
def ensure_map_public(conn, module, subnet, map_public, check_mode):
if check_mode:
return
try:
conn.modify_subnet_attribute(SubnetId=subnet['id'], MapPublicIpOnLaunch={'Value': map_public})
except botocore.exceptions.ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_matching_subnet(conn, vpc_id, cidr):
filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr-block': cidr})
subnets = get_subnet_info(conn.describe_subnets(Filters=filters))
if len(subnets) > 0:
return subnets[0]
else:
return None
def ensure_subnet_present(conn, module, vpc_id, cidr, az, tags, map_public, check_mode):
subnet = get_matching_subnet(conn, vpc_id, cidr)
changed = False
if subnet is None:
if not check_mode:
subnet = create_subnet(conn, module, vpc_id, cidr, az, check_mode)
changed = True
# Subnet will be None when check_mode is true
if subnet is None:
return {
'changed': changed,
'subnet': {}
}
if map_public != subnet['map_public_ip_on_launch']:
ensure_map_public(conn, module, subnet, map_public, check_mode)
subnet['map_public_ip_on_launch'] = map_public
changed = True
if tags != subnet['tags']:
ensure_tags(conn, module, subnet, tags, False, check_mode)
subnet['tags'] = tags
changed = True
return {
'changed': changed,
'subnet': subnet
}
def ensure_subnet_absent(conn, module, vpc_id, cidr, check_mode):
subnet = get_matching_subnet(conn, vpc_id, cidr)
if subnet is None:
return {'changed': False}
try:
if not check_mode:
conn.delete_subnet(SubnetId=subnet['id'], DryRun=check_mode)
return {'changed': True}
except botocore.exceptions.ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
az=dict(default=None, required=False),
cidr=dict(default=None, required=True),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']),
vpc_id=dict(default=None, required=True),
map_public=dict(default=False, required=False, type='bool')
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO3:
module.fail_json(msg='boto3 and botocore are required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
|
jayvdb/coala | coalib/misc/BuildManPage.py | Python | agpl-3.0 | 7,352 | 0 | import argparse
import datetime
from distutils.core import Command
from distutils.errors import DistutilsOptionError
class BuildManPage(Command):
# Start ignoring LineContinuationBear
"""
Add a ``build_manpage`` command to your setup.py.
To use this Command class add a command to call this class::
# For setuptools
setup(
entry_points={
"distutils.commands": [
"build_manpage = coalib.misc.BuildManPage:BuildManPage"
]
}
)
# For distutils
from coalib.misc.BuildManPage import BuildManPage
setup(
cmdclass={'build_manpage': BuildManPage}
)
You can then use the following setup command to produce a man page::
$ python setup.py build_manpage --output=coala.1 \
--parser=coalib.parsing.DefaultArgParser:default_arg_parser
If automatically want to build the man page every time you invoke
your build, add to your ``setup.cfg`` the following::
[build_manpage]
output = <appname>.1
parser = <path_to_your_parser>
"""
# Stop ignoring
user_options = [
('output=', 'O', 'output file'),
('parser=', None, 'module path to an ArgumentParser instance'
'(e.g. mymod:func, where func is a method or function which return'
'an arparse.ArgumentParser instance.'),
]
def initialize_options(self):
self.output = None
self.parser = None
def finalize_options(self):
if self.output is None:
raise DistutilsOptionError('\'output\' option is required')
if self.parser is None:
raise DistutilsOptionError('\'parser\' option is required')
mod_name, func_name = self.parser.split(':')
fromlist = mod_name.split('.')
mod = __import__(mod_name, fromlist=fromlist)
self._parser = (
getattr(mod, func_name)(formatter_class=ManPageFormatter))
self.announce('Writing man page %s' % self.output)
self._today = datetime.date.today()
def run(self):
dist = self.distribution
homepage = dist.get_url()
maintainer = dist.get_maintainer()
_license = dist.get_license()
appname = self._parser.prog
sections = {'see also': ('Online documentation: {}'.format(homepage)),
'maintainer(s)': maintainer,
'license': _license}
dist = self.distribution
mpf = ManPageFormatter(appname,
desc=dist.get_description(),
long_desc=dist.get_long_description(),
ext_sections=sections,
parser=self._parser)
formatted_man_page = mpf.format_man_page()
with open(self.output, 'w') as man_file:
man_file.write(formatted_man_page)
class ManPageFormatter(argparse.HelpFormatter):
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None,
desc=None,
long_desc=None,
ext_sections=None,
parser=None):
argparse.HelpFormatter.__init__(self, prog)
self._prog = prog
self._section = 1
self._today = datetime.date.today().strftime('%Y\\-%m\\-%d')
self._desc = desc
self._long_desc = long_desc
self._ext_sections = ext_sections
self._parser = parser
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts = [ManPageFormatter._bold(action_str)
for action_str in action.option_strings]
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = ManPageFormatter._underline(action.dest.upper())
args_string = self._format_args(action, default)
parts = ['%s %s' % (self._bold(option_string), args_string)
for option_string in action.option_strings]
return ', '.join(parts)
@staticmethod
def _markup(string):
return string.replace('-', '\\-')
@staticmethod
def _add_format(string, front, back):
if not string.strip().startswith(front):
string = front + string
if not string.strip().endswith(back):
string = string + back
return string
@staticmethod
def _underline(string):
return ManPageFormatter._add_format(string, '\\fI', '\\fR')
@staticmethod
def _bold(string):
return ManPageFormatter._add_format(string, '\\fB', '\\fR')
def _mk_title(self):
return '.TH {0} {1} {2}\n'.format(self._prog,
self._section,
self._today)
def _mk_name(self):
return '.SH NAME\n%s\n' % (self._parser.prog)
def _mk_synopsis(self):
self.add_usage(self._parser.usage,
self._parser._actions,
self._parser._mutually_exclusive_groups,
prefix='')
usage = self._format_usage(None,
self._parser._actions,
self._parser._mutually_exclusive_groups,
'')
usage = usage.replace('%s ' % self._prog, '')
usage = ('.SH SYNOPSIS\n \\fB%s\\fR %s\n'
% (ManPageFormatter._markup(self._prog), usage))
return usage
def _mk_description(self):
if self._long_desc:
long_desc = self._long_desc.replace('\n', '\n.br\n')
return '.SH DESCRIPTION\n%s\n' % self._markup(lon | g_desc)
else:
return ''
def _mk_options(self):
formatter = self._parser._get_formatter()
# positionals, optionals and user-defined groups
for action_group in self._parser._action_groups:
formatter.start_section(None)
formatter.add_text(None)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# | epilog
formatter.add_text(self._parser.epilog)
# determine help from format above
return '.SH OPTIONS\n' + formatter.format_help()
def _mk_footer(self):
sections = self._ext_sections
if not hasattr(sections, '__iter__'):
return ''
footer = []
for section in sorted(sections.keys()):
part = '.SH {}\n {}'.format(section.upper(), sections[section])
footer.append(part)
return '\n'.join(footer)
def format_man_page(self):
page = []
page.append(self._mk_title())
page.append(self._mk_name())
page.append(self._mk_synopsis())
page.append(self._mk_description())
page.append(self._mk_options())
page.append(self._mk_footer())
return ''.join(page)
|
kamcpp/tensorflow | tensorflow/python/kernel_tests/constant_op_test.py | Python | apache-2.0 | 26,533 | 0.011495 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Versi | on 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing | permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ConstantOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import array_ops
class ConstantTest(tf.test.TestCase):
def _testCpu(self, x):
np_ans = np.array(x)
with self.test_session(use_gpu=False):
tf_ans = tf.convert_to_tensor(x).eval()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testGpu(self, x):
np_ans = np.array(x)
with self.test_session(use_gpu=True):
tf_ans = tf.convert_to_tensor(x).eval()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testFloat(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32))
self._testAll(np.empty((2, 0, 5)).astype(np.float32))
def testDouble(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64))
self._testAll(np.empty((2, 0, 5)).astype(np.float64))
def testInt32(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int32))
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
def testInt64(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int64))
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
def testComplex64(self):
self._testAll(
np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5]).astype(
np.complex64))
self._testAll(np.complex(
1, 2) * np.random.normal(size=30).reshape([2, 3, 5]).astype(
np.complex64))
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
def testComplex128(self):
self._testAll(
np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5]).astype(
np.complex128))
self._testAll(np.complex(
1, 2) * np.random.normal(size=30).reshape([2, 3, 5]).astype(
np.complex128))
self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
def testString(self):
self._testCpu(np.array([tf.compat.as_bytes(str(x))
for x in np.arange(-15, 15)]).reshape([2, 3, 5]))
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
def testStringWithNulls(self):
with self.test_session():
val = tf.convert_to_tensor(b"\0\0\0\0").eval()
self.assertEqual(len(val), 4)
self.assertEqual(val, b"\0\0\0\0")
with self.test_session():
val = tf.convert_to_tensor(b"xx\0xx").eval()
self.assertEqual(len(val), 5)
self.assertAllEqual(val, b"xx\0xx")
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
with self.test_session():
val = tf.convert_to_tensor(nested).eval()
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
# numpy array, which loses the null terminators.
self.assertEqual(val.tolist(), nested)
def testExplicitShapeNumPy(self):
with tf.Graph().as_default():
c = tf.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[2, 3, 5])
self.assertEqual(c.get_shape(), [2, 3, 5])
def testImplicitShapeNumPy(self):
with tf.Graph().as_default():
c = tf.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self.assertEqual(c.get_shape(), [2, 3, 5])
def testExplicitShapeList(self):
with tf.Graph().as_default():
c = tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
self.assertEqual(c.get_shape(), [7])
def testImplicitShapeList(self):
with tf.Graph().as_default():
c = tf.constant([1, 2, 3, 4, 5, 6, 7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeNumber(self):
with tf.Graph().as_default():
c = tf.constant(1, shape=[1])
self.assertEqual(c.get_shape(), [1])
def testImplicitShapeNumber(self):
with tf.Graph().as_default():
c = tf.constant(1)
self.assertEqual(c.get_shape(), [])
def testShapeInconsistent(self):
with tf.Graph().as_default():
c = tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
self.assertEqual(c.get_shape(), [10])
# pylint: disable=g-long-lambda
def testShapeWrong(self):
with tf.Graph().as_default():
with self.assertRaisesWithPredicateMatch(
ValueError,
lambda e: ("Too many elements provided. Needed at most 5, "
"but received 7" == str(e))):
tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
# pylint: enable=g-long-lambda
def testTooLargeConstant(self):
with tf.Graph().as_default():
large_array = np.zeros((512, 1024, 1024), dtype=np.float32)
with self.assertRaisesRegexp(
ValueError,
"Cannot create a tensor proto whose content is larger than 2GB."):
c = tf.constant(large_array)
def testTooLargeGraph(self):
with tf.Graph().as_default() as g:
large_array = np.zeros((256, 1024, 1024), dtype=np.float32)
c = tf.constant(large_array)
d = tf.constant(large_array)
with self.assertRaisesRegexp(
ValueError, "GraphDef cannot be larger than 2GB."):
g.as_graph_def()
def testSparseValuesRaiseErrors(self):
with self.assertRaisesRegexp(ValueError,
"setting an array element with a sequence"):
c = tf.constant([[1, 2], [3]], dtype=tf.int32)
with self.assertRaisesRegexp(ValueError, "must be a dense"):
c = tf.constant([[1, 2], [3]])
with self.assertRaisesRegexp(ValueError, "must be a dense"):
c = tf.constant([[1, 2], [3], [4, 5]])
class AsTensorTest(tf.test.TestCase):
def testAsTensorForTensorInput(self):
with tf.Graph().as_default():
t = tf.constant(10.0)
x = tf.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
with tf.Graph().as_default():
x = tf.convert_to_tensor(10.0)
self.assertTrue(isinstance(x, tf.Tensor))
def testAsTensorForShapeInput(self):
with self.test_session():
x = tf.convert_to_tensor(tf.TensorShape([]))
self.assertEqual(tf.int32, x.dtype)
self.assertAllEqual([], x.eval())
x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3]))
self.assertEqual(tf.int32, x.dtype)
self.assertAllEqual([1, 2, 3], x.eval())
x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3]), dtype=tf.int64)
self.assertEqual(tf.int64, x.dtype)
self.assertAllEqual([1, 2, 3], x.eval())
x = tf.reshape(tf.zeros([6]), tf.TensorShape([2, 3]))
self.assertAllEqual([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], x.eval())
with self.assertRaisesRegexp(ValueError, "partially known"):
tf.convert_to_tensor(tf.TensorShape(None))
with self.assertRaisesRegexp(ValueError, "partially known"):
tf.convert_to_tensor(tf.TensorShape([1, None, 64]))
with self.assertRaises(TypeError):
tf.convert_to_tens |
thedrow/samsa | pykafka/topic.py | Python | apache-2.0 | 7,391 | 0.000271 | """
Author: Keith Bourgoin, Emmett Butler
"""
__license__ = """
Copyright 2015 Parse.ly, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["Topic"]
import logging
from collections import defaultdict
from .balancedconsumer import BalancedConsumer
from .common import OffsetType
from .exceptions import LeaderNotAvailable
from .handlers import GEventHandler
from .partition import Partition
from .producer import Producer
from .protocol import PartitionOffsetRequest
from .simpleconsumer import SimpleConsumer
from .utils.compat import iteritems, itervalues
log = logging.getLogger(__name__)
try:
from . import rdkafka
log.info("Successfully loaded pykafka.rdkafka extension.")
except ImportError:
rdkafka = False
log.info("Could not load pykafka.rdkafka extension.", exc_info=True)
class Topic(object):
"""
A Topic is an abstraction over the kafka concept of a topic.
It contains a dictionary of partitions that comprise it.
"""
def __init__(self, cluster, topic_metadata):
"""Create the Topic from metadata.
:param cluster: The Cluster to use
:type cluster: :class:`pykafka.cluster.Cluster`
:param topic_metadata: Metadata for all topics.
:type topic_metadata: :class:`pykafka.protocol.TopicMetadata`
"""
self._name = topic_metadata.name
self._cluster = cluster
self._partitions = {}
self.update(topic_metadata)
def __repr__(self):
return "<{module}.{classname} at {id_} (name={name})>".format(
module=self.__class__.__module__,
classname=self.__class__.__name__,
id_=hex(id(self)),
name=self._name
)
@property
def name(self):
"""The name of this topic"""
return self._name
@pro | perty
def partitions(self):
"""A dictionary containing all known partitions for this topic"""
return self._partitions
def get_producer(self, use_rdkafka=False, **kwargs):
"""Create a :class:`pykafka.producer.Producer` for this topic.
For a description of all available `kwargs`, see the Producer docstring.
| """
if not rdkafka and use_rdkafka:
raise ImportError("use_rdkafka requires rdkafka to be installed")
if isinstance(self._cluster.handler, GEventHandler) and use_rdkafka:
raise ImportError("use_rdkafka cannot be used with gevent")
Cls = rdkafka.RdKafkaProducer if rdkafka and use_rdkafka else Producer
return Cls(self._cluster, self, **kwargs)
def get_sync_producer(self, **kwargs):
"""Create a :class:`pykafka.producer.Producer` for this topic.
For a description of all available `kwargs`, see the Producer docstring.
"""
return Producer(self._cluster, self, sync=True, **kwargs)
def fetch_offset_limits(self, offsets_before, max_offsets=1):
"""Get earliest or latest offset.
Use the Offset API to find a limit of valid offsets for each partition
in this topic.
:param offsets_before: Return an offset from before this timestamp (in
milliseconds)
:type offsets_before: int
:param max_offsets: The maximum number of offsets to return
:type max_offsets: int
"""
requests = defaultdict(list) # one request for each broker
for part in itervalues(self.partitions):
requests[part.leader].append(PartitionOffsetRequest(
self.name, part.id, offsets_before, max_offsets
))
output = {}
for broker, reqs in iteritems(requests):
res = broker.request_offset_limits(reqs)
output.update(res.topics[self.name])
return output
def earliest_available_offsets(self):
"""Get the earliest offset for each partition of this topic."""
return self.fetch_offset_limits(OffsetType.EARLIEST)
def latest_available_offsets(self):
"""Get the latest offset for each partition of this topic."""
return self.fetch_offset_limits(OffsetType.LATEST)
def update(self, metadata):
"""Update the Partitions with metadata about the cluster.
:param metadata: Metadata for all topics
:type metadata: :class:`pykafka.protocol.TopicMetadata`
"""
p_metas = metadata.partitions
# Remove old partitions
removed = set(self._partitions.keys()) - set(p_metas.keys())
if len(removed) > 0:
log.info('Removing %d partitions', len(removed))
for id_ in removed:
log.debug('Removing partition %s', self._partitions[id_])
self._partitions.pop(id_)
# Add/update current partitions
brokers = self._cluster.brokers
if len(p_metas) > 0:
log.info("Adding %d partitions", len(p_metas))
for id_, meta in iteritems(p_metas):
if meta.leader not in brokers:
raise LeaderNotAvailable()
if meta.id not in self._partitions:
log.debug('Adding partition %s/%s', self.name, meta.id)
self._partitions[meta.id] = Partition(
self, meta.id,
brokers[meta.leader],
[brokers[b] for b in meta.replicas],
[brokers[b] for b in meta.isr],
)
else:
self._partitions[id_].update(brokers, meta)
def get_simple_consumer(self,
consumer_group=None,
use_rdkafka=False,
**kwargs):
"""Return a SimpleConsumer of this topic
:param consumer_group: The name of the consumer group to join
:type consumer_group: str
:param use_rdkafka: Use librdkafka-backed consumer if available
:type use_rdkafka: bool
"""
if not rdkafka and use_rdkafka:
raise ImportError("use_rdkafka requires rdkafka to be installed")
if isinstance(self._cluster.handler, GEventHandler) and use_rdkafka:
raise ImportError("use_rdkafka cannot be used with gevent")
Cls = (rdkafka.RdKafkaSimpleConsumer
if rdkafka and use_rdkafka else SimpleConsumer)
return Cls(self,
self._cluster,
consumer_group=consumer_group,
**kwargs)
def get_balanced_consumer(self, consumer_group, **kwargs):
"""Return a BalancedConsumer of this topic
:param consumer_group: The name of the consumer group to join
:type consumer_group: str
"""
if "zookeeper_connect" not in kwargs and \
self._cluster._zookeeper_connect is not None:
kwargs['zookeeper_connect'] = self._cluster._zookeeper_connect
return BalancedConsumer(self, self._cluster, consumer_group, **kwargs)
|
jpelias/pyTelegramBotAPI | telebot/__init__.py | Python | gpl-2.0 | 20,143 | 0.003426 | # -*- coding: utf-8 -*-
from __future__ import print_function
import threading
import time
import re
import sys
import six
import logging
logger = logging.getLogger('TeleBot')
formatter = logging.Formatter('%(asctime)s (%(filename)s:%(lineno)d) %(levelname)s - %(name)s: "%(message)s"')
console_output_handler = logging.StreamHandler(sys.stderr)
console_output_handler.setFormatter(formatter)
logger.addHandler(console_output_handler)
logger.setLevel(logging.ERRO | R)
from telebot import apihelper, types, util
"""
Module : telebot
"""
class TeleBot:
""" This is TeleBot Class
Methods:
getMe
sendMessage
forwardMessage
sendPhoto
sendAudio
sendDocument
sendSticker
sendVideo
sendLocation
sendChatAction
getUserProfilePhotos
getUpdates
"""
def __init__(self, token, create_threads=True, num_threads=4 | ):
"""
:param token: bot API token
:param create_threads: Create thread for message handler
:param num_threads: Number of worker in thread pool.
:return: Telebot object.
"""
self.token = token
self.update_listener = []
self.polling_thread = None
self.__stop_polling = threading.Event()
self.last_update_id = 0
self.num_threads = num_threads
self.__create_threads = create_threads
self.message_subscribers_messages = []
self.message_subscribers_callbacks = []
# key: chat_id, value: handler list
self.message_subscribers_next_step = {}
self.message_handlers = []
if self.__create_threads:
self.worker_pool = util.ThreadPool(num_threads)
def get_updates(self, offset=None, limit=None, timeout=20):
"""
Use this method to receive incoming updates using long polling (wiki). An Array of Update objects is returned.
:param offset: Integer. Identifier of the first update to be returned.
:param limit: Integer. Limits the number of updates to be retrieved.
:param timeout: Integer. Timeout in seconds for long polling.
:return: array of Updates
"""
json_updates = apihelper.get_updates(self.token, offset, limit, timeout)
ret = []
for ju in json_updates:
ret.append(types.Update.de_json(ju))
return ret
def get_update(self):
"""
Retrieves any updates from the Telegram API.
Registered listeners and applicable message handlers will be notified when a new message arrives.
:raises ApiException when a call has failed.
"""
updates = self.get_updates(offset=(self.last_update_id + 1), timeout=3)
new_messages = []
for update in updates:
if update.update_id > self.last_update_id:
self.last_update_id = update.update_id
new_messages.append(update.message)
logger.debug('Received {0} new messages'.format(len(new_messages)))
if len(new_messages) > 0:
self.process_new_messages(new_messages)
def process_new_messages(self, new_messages):
self.__notify_update(new_messages)
self._notify_command_handlers(new_messages)
self._notify_message_subscribers(new_messages)
self._notify_message_next_handler(new_messages)
def __notify_update(self, new_messages):
for listener in self.update_listener:
if self.__create_threads:
self.worker_pool.put(listener, new_messages)
else:
listener(new_messages)
def polling(self, none_stop=False, interval=0, block=True):
"""
This function creates a new Thread that calls an internal __polling function.
This allows the bot to retrieve Updates automagically and notify listeners and message handlers accordingly.
Do not call this function more than once!
Always get updates.
:param none_stop: Do not stop polling when Exception occur.
:return:
"""
self.__stop_polling.set()
if self.polling_thread:
self.polling_thread.join() # wait thread stop.
self.__stop_polling.clear()
self.polling_thread = threading.Thread(target=self.__polling, args=([none_stop, interval]))
self.polling_thread.daemon = True
self.polling_thread.start()
if block:
while self.polling_thread.is_alive:
try:
time.sleep(.1)
except KeyboardInterrupt:
logger.info("Received KeyboardInterrupt. Stopping.")
self.stop_polling()
self.polling_thread.join()
break
def __polling(self, none_stop, interval):
logger.info('Started polling.')
error_interval = .25
while not self.__stop_polling.wait(interval):
try:
self.get_update()
error_interval = .25
except apihelper.ApiException as e:
if not none_stop:
self.__stop_polling.set()
logger.info("Exception occurred. Stopping.")
else:
time.sleep(error_interval)
error_interval *= 2
logger.error(e)
logger.info('Stopped polling.')
def stop_polling(self):
self.__stop_polling.set()
def set_update_listener(self, listener):
self.update_listener.append(listener)
def get_me(self):
result = apihelper.get_me(self.token)
return types.User.de_json(result)
def get_file(self, file_id):
return types.File.de_json(apihelper.get_file(self.token, file_id))
def download_file(self, file_path):
return apihelper.download_file(self.token, file_path)
def get_user_profile_photos(self, user_id, offset=None, limit=None):
"""
Retrieves the user profile photos of the person with 'user_id'
See https://core.telegram.org/bots/api#getuserprofilephotos
:param user_id:
:param offset:
:param limit:
:return: API reply.
"""
result = apihelper.get_user_profile_photos(self.token, user_id, offset, limit)
return types.UserProfilePhotos.de_json(result)
def send_message(self, chat_id, text, disable_web_page_preview=None, reply_to_message_id=None, reply_markup=None,
parse_mode=None):
"""
Use this method to send text messages.
Warning: Do not send more than about 5000 characters each message, otherwise you'll risk an HTTP 414 error.
If you must send more than 5000 characters, use the split_string function in apihelper.py.
:param chat_id:
:param text:
:param disable_web_page_preview:
:param reply_to_message_id:
:param reply_markup:
:param parse_mode:
:return: API reply.
"""
return types.Message.de_json(
apihelper.send_message(self.token, chat_id, text, disable_web_page_preview, reply_to_message_id,
reply_markup, parse_mode))
def forward_message(self, chat_id, from_chat_id, message_id):
"""
Use this method to forward messages of any kind.
:param chat_id: which chat to forward
:param from_chat_id: which chat message from
:param message_id: message id
:return: API reply.
"""
return types.Message.de_json(apihelper.forward_message(self.token, chat_id, from_chat_id, message_id))
def send_photo(self, chat_id, photo, caption=None, reply_to_message_id=None, reply_markup=None):
"""
Use this method to send photos.
:param chat_id:
:param photo:
:param caption:
:param reply_to_message_id:
:param reply_markup:
:return: API reply.
"""
return types.Message.de_json(
apihelper.send_photo(self.token, chat_id, photo, caption, reply_to_message_id, reply_markup))
def send_audio(self, chat_id, audio, duration=None, performer=None, titl |
bchiroma/DreamProject_2 | dream/plugins/BatchesWIPSpreadsheet.py | Python | gpl-3.0 | 1,317 | 0.019742 | from copy import copy
import json
import time
import random
import operator
from datetime import datetime
from dream.plugins import plugin
class BatchesWIPSpreadsheet(plugin.InputPreparationPlugin):
""" Input prepration
read wip-srpeadsheet data and update the wip property of the stations.
"""
def preprocess(self, data):
""" Set the WIP in queue from spreadsheet data.
""" |
wipData=data['input'].get('wip_spreadsheet', None)
node=data['graph']['node']
if wipData:
wipData.pop(0) # po | p the column names
for wipItem in wipData:
partId=wipItem[0]
# in case there is no id, do not process the element
if not partId:
continue
stationId=wipItem[1]
numberOfUnits=wipItem[2]
unitsToProcess=wipItem[3]
if not unitsToProcess:
unitsToProcess=numberOfUnits
_class="Dream."+wipItem[4]
parentBatchId=wipItem[5]
wip=node[stationId].get('wip',[])
if not wip:
node[stationId]['wip']=[]
node[stationId]['wip'].append({
"_class": _class,
"id": partId,
"name": partId,
"numberOfUnits":numberOfUnits,
"unitsToProcess":unitsToProcess,
"parentBatchId":parentBatchId
})
return data |
mtekel/digitalmarketplace-supplier-frontend | app/main/forms/suppliers.py | Python | mit | 1,562 | 0 | from flask.ext.wtf import Form
from wtforms import IntegerField, StringField, FieldList
from wtforms.validators import DataRequired, Email, ValidationError
def word_length(limit=None, message=None):
message = message or 'Must not be more than %d words'
message = message % limit
def _length(form, field):
if not field.data or not limit:
retur | n field
if len(field.data.split()) > limit:
raise ValidationError(message)
return _length
class EditSupplierForm(Form):
description = StringField('Supplier summary', validators=[
word_length(50, 'Your summary must not be more than %d words')
])
clients = FieldList(StringField())
def validate_clients(form, field):
if len(field.data) > 10:
raise Validati | onError('You must have 10 or fewer clients')
class EditContactInformationForm(Form):
id = IntegerField()
address1 = StringField('Business address')
address2 = StringField('Business address')
city = StringField('Town or city')
country = StringField()
postcode = StringField(validators=[
DataRequired(message="Postcode can not be empty"),
])
website = StringField()
phoneNumber = StringField('Phone number')
email = StringField('Email address', validators=[
DataRequired(message="Email can not be empty"),
Email(message="Please enter a valid email address")
])
contactName = StringField('Contact name', validators=[
DataRequired(message="Contact name can not be empty"),
])
|
mferenca/HMS-ecommerce | ecommerce/extensions/analytics/utils.py | Python | agpl-3.0 | 4,120 | 0.003398 | from functools import wraps
import json
import logging
from threadlocals.threadlocals import get_current_request
logger = logging.getLogger(__name__)
def is_segment_configured():
"""Returns a Boolean indicating if Segment has been configured for use."""
return bool(get_current_request().site.siteconfiguration.segment_key)
def parse_tracking_context(user):
"""Extract user ID, client ID, and IP address from a user's tracking context.
Arguments:
user (User): An instance of the User model.
Returns:
Tuple of strings: user_tracking_id, lms_client_id, lms_ip
"""
tracking_context = user.tracking_context or {}
user_tracking_id = tracking_context.get('lms_user_id')
if user_tracking_id is None:
# Even if we cannot extract a good platform user ID from the context, we can still track the
# event with an arbitrary local user ID. However, we need to disambiguate the ID we choose
# since there's no guarantee it won't collide with a platform user ID that may be tracked
# at some point.
user_tracking_id = 'ecommerce-{}'.format(user.id)
lms_client_id = tracking_context.get('lms_client_id')
lms_ip = tracking_context.get('lms_ip')
return user_tracking_id, lms_client_id, lms_ip
def sile | nce_exceptions(msg):
"""Silences exceptions raised by the decorated function.
| Also logs the provided message. Used to silence exceptions raised by
non-essential signal receivers invoked with `send()`, to prevent critical
program flow from being interrupted.
Arguments:
msg (str): A message to be logged when an exception is raised.
"""
def decorator(func): # pylint: disable=missing-docstring
@wraps(func)
def wrapper(*args, **kwargs): # pylint: disable=missing-docstring
try:
return func(*args, **kwargs)
except: # pylint: disable=bare-except
logger.exception(msg)
return wrapper
return decorator
def audit_log(name, **kwargs):
"""DRY helper used to emit an INFO-level log message.
Messages logged with this function are used to construct an audit trail. Log messages
should be emitted immediately after the event they correspond to has occurred and, if
applicable, after the database has been updated. These log messages use a verbose
key-value pair syntax to make it easier to extract fields when parsing the application's
logs.
This function is variadic, accepting a variable number of keyword arguments.
Arguments:
name (str): The name of the message to log. For example, 'payment_received'.
Keyword Arguments:
Indefinite. Keyword arguments are strung together as comma-separated key-value
pairs ordered alphabetically by key in the resulting log message.
Returns:
None
"""
# Joins sorted keyword argument keys and values with an "=", wraps each value
# in quotes, and separates each pair with a comma and a space.
payload = u', '.join([u'{k}="{v}"'.format(k=k, v=v) for k, v in sorted(kwargs.items())])
message = u'{name}: {payload}'.format(name=name, payload=payload)
logger.info(message)
def prepare_analytics_data(user, segment_key, course_id=None):
""" Helper function for preparing necessary data for analytics.
Arguments:
user (User): The user making the request.
segment_key (str): Segment write/API key.
course_id (str): The course ID.
Returns:
str: JSON object with the data for analytics.
"""
data = {
'course': {
'courseId': course_id
},
'tracking': {
'segmentApplicationId': segment_key
}
}
if user.is_authenticated():
user_data = {
'user': {
'username': user.get_username(),
'name': user.get_full_name(),
'email': user.email
}
}
else:
user_data = {
'user': 'AnonymousUser'
}
data.update(user_data)
return json.dumps(data)
|
srottem/indy-sdk | wrappers/python/tests/payment/test_build_get_payment_sources_request.py | Python | apache-2.0 | 828 | 0.006039 | from indy import IndyError
from indy import payment
from indy.error import ErrorCode
from tests.payment.constants import *
import pytest
@pytest.mark.asyncio
async def test_build_get_payment_sources_request_works_for_unknown_payment_method(wallet_handle, did_trustee):
with pytest.raises(IndyError) as e:
await payment.build_get_payment_sources_request(wallet_ha | ndle, did_trustee, payment_address)
assert ErrorCode.PaymentUnknownMethodError == e.value.error_code
@pytest.mark.asyncio
async def test_build_get_payment_sources_request_works_for_invalid_payment_address(wallet_handle, did_trustee):
with pytest.raises(IndyE | rror) as e:
await payment.build_get_payment_sources_request(wallet_handle, did_trustee, "pay:null1")
assert ErrorCode.PaymentIncompatibleMethodsError == e.value.error_code |
problemshift/kf5py | setup.py | Python | mit | 711 | 0.002813 | from setuptools import setup
setup(
name='kf5py',
py_modules = ['kf5py'],
version='0.1.8',
author='Chris Teplovs',
author_email='dr.chris@problemshift.com',
url='http://problemshift.github.io/kf5py/',
lice | nse='LICENSE.txt',
description='Python-based utilities for KF5.',
install_requires=[
"requests >= 2.3.0"
],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/R | esearch",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
]
)
|
nist-ionstorage/electrode | electrode/pattern_constraints.py | Python | gpl-3.0 | 8,125 | 0.000738 | # -*- coding: utf8 -*-
#
# electrode: numeric tools for Paul traps
#
# Copyright (C) 2011-2012 Robert Jordens <jordens@phys.ethz.ch>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, print_function,
unicode_literals, division)
import warnings
import numpy as np
try:
import cvxopt, cvxopt.modeling
except ImportError:
warnings.warn("cvxopt not found, optimizations will fail", ImportWarning)
from .utils import (select_tensor, expand_tensor, rotate_tensor,
name_to_deriv, deriv_to_reduced_idx)
"""Constraints and objectives to be used with `System.optimize()`
.. note::
Needs cvxopt.
"""
class Constraint(object):
def objective(self, system, variables):
return
yield
def constraints(self, system, variables):
return
yield
class PatternRangeConstraint(Constraint):
"""Constrains the potential to lie within the given range
Parameters
----------
min : float or None
Minimum potential value or unbounded below if None.
max : float or None
Maximum potential value or unbounded above if None.
index : int or None
Only affect the given electrode index or all if None.
"""
def __init__(self, min=None, max=None, index=None):
self.min = min
self.max = max
self.index = index
def constraints(self, system, variables):
if self.index is not None:
variables = variables[self.index]
if self.min is not None or self.max is not None:
if self.min == self.max:
yield variables == self.min
else:
if self.min is not None:
yield variables >= self.min
if self.max is not None:
yield variables <= self.max
class SingleValueConstraint(Constraint):
"""Base class for Constraints/Objectives.
Parameters
----------
value : float or None
If not None, the final value (the .get() of self) is optimized
and kept proportional to `value`.
min : float or None
If not None, the value of this constraint is kept at or above
`min.`
max : float or None
If not None, it is kept below or equal `max`.
offset : float or None
The value is forced exactly (not proportional) to `offset`.
"""
def __init__(self, value=None, min=None, max=None, offset=None):
self.value = value
self.offset = offset
self.min = min
self.max = max
def get(self, system, variables):
raise NotImplementedError
def objective(self, system, variables):
if self.value is not None:
c = self.get(system, variables)
yield c, float(self.value)
def constraints(self, system, variables):
if (self.offset is not None
or self.min is not None
or self.max is not None):
c = self.get(system, variables)
d = cvxopt.matrix(np.ascontiguousarray(c))
v = cvxopt.modeling.dot(d, variables)
if self.offset is not None:
yield v == float(self.offset)
if self.min is not None:
yield v >= float(self.min)
if self.max is not None:
yield v <= float(self.max)
class PotentialObjective(SingleValueConstraint):
"""Constrain or optimize potential.
Parameters
----------
x : array_like, shape (3,)
Position where to evalue/constrain/optimize potential
derivati | ve : str
Derivative to constrain/optimize. String of characters from
"xyz". See `utils.name_to_deriv.keys()` for possible values.
Not all possible cartesian derivatives are allowed, only those
that are evaluated as the basis for the given order. Use
`MultiPotentialObjective` to constrain sums or differences that
make up the other derivatives.
rotation : array_like, shape (3, 3)
Rotation of the local coordinate | system. np.eye(3) if None.
**kwargs : any
Passed to `SingleValueConstraint()`
"""
def __init__(self, x, derivative, rotation=None, **kwargs):
super(PotentialObjective, self).__init__(**kwargs)
self.x = np.asanyarray(x, np.double)
self.derivative = derivative
self.order = len(derivative)
self.reduced_idx = deriv_to_reduced_idx(derivative)
self.rotation = (np.asanyarray(rotation, np.double)
if rotation is not None else None)
def get(self, system, variables):
c = system.individual_potential(self.x, self.order)[:, 0, :]
if self.rotation is not None:
c = select_tensor(rotate_tensor(expand_tensor(c),
self.rotation, self.order))
c = c[:, self.reduced_idx]
if type(self.reduced_idx) is tuple:
c = -c.sum(1)
return c
class MultiPotentialObjective(SingleValueConstraint):
"""Constrains or optimizes a linear combination of
`PotentialObjective()` s.
The value of this constraint (either used as a min/max or equal
constraint or as part of the objective) is the sum of the
constituents' `objective()` s. Thus the component `value` s are their
weights.
Parameters
----------
components : list of `PotentialObjective()` s
**kwargs : any
Passed to `SingleValueConstraint()`.
"""
def __init__(self, components=[], **kwargs):
super(MultiPotentialObjective, self).__init__(**kwargs)
self.components = components
# component values are weights
def get(self, system, variables):
c = 0.
for oi in self.components:
for ci, vi in oi.objective(system, variables):
c = c+vi*ci
return c
class VoltageDerivativeConstraint(Constraint):
def __init__(self, order, weight=0, max=None, min=None,
smooth=False, delta=1, norm="one", abs=True):
self.order = order
self.weight = weight
self.smooth = smooth
self.delta = delta
self.norm = norm
self.abs = abs
self.max = max
self.min = min
def get(self, system, variables):
obj = variables
for i in range(self.order):
if self.smooth and i % 2 == 0:
obj = obj[self.delta:0:-1] + obj + obj[-2:-2-self.delta:-1]
obj = [(obj[i + self.delta] - obj[i]) for i in
range(len(obj) - self.delta)]
return [v*(1./(self.delta**self.order)) for v in obj]
def coef(self, system, variables):
for v in self.get(system, variables):
if self.abs:
v = abs(v)
if self.norm == "one":
yield cvxopt.modeling.sum(v)
elif self.norm == "inf":
yield cvxopt.modeling.max(v)
else:
raise ValueError(self.norm)
def objective(self, system, variables):
if self.weight:
for v in self.coef(system, variables):
yield v, float(self.weight)
def constraints(self, system, variables):
if self.max is not None:
for v in self.coef(system, variables):
yield v <= float(self.max)
if self.min is not None:
for v in self.coef(system, variables):
yield v >= float(self.min)
class SymmetryConstaint(Constraint):
def __init__(self, a, b):
raise NotImplementedError
|
syci/ingadhoc-odoo-addons | warning_box/__openerp__.py | Python | agpl-3.0 | 1,526 | 0.000655 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2013 jmesteve All Rights Reserved
# https://github.com/jmesteve
# <jmesteve@me.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Warning box",
'version': '0.1',
'category': 'Tools',
'description': """
[ENG] Add Warning box.
usage return self.pool.get('warning_box').info(cr, uid, title='The title', message | ='the message')
""",
'author': 'jmesteve, ADHOC SA',
'w | ebsite': 'https://github.com/jmesteve',
'license': 'AGPL-3',
"depends": ['base'],
"data": [
'warning_box.xml',
],
"active": False,
"installable": True
}
|
bastibl/gnuradio | gr-dtv/python/dtv/atsc_rx_filter.py | Python | gpl-3.0 | 2,603 | 0.006915 | #!/usr/bin/env /usr/bin/python
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see | the file COPYING. If not, write t | o
# the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr, filter
from . import dtv_swig as dtv
# FIXME move these into separate constants module
ATSC_CHANNEL_BW = 6.0e6
ATSC_SYMBOL_RATE = 4.5e6/286*684 # ~10.76 Mbaud
ATSC_RRC_SYMS = 8 # filter kernel extends over 2N+1 symbols
class atsc_rx_filter(gr.hier_block):
def __init__(self, input_rate, sps):
gr.hier_block.__init__(self, "atsc_rx_filter",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
# Create matched RX filter with RRC response for fractional
# interpolator.
nfilts = 16
output_rate = ATSC_SYMBOL_RATE*sps # Desired oversampled sample rate
filter_rate = input_rate*nfilts
symbol_rate = ATSC_SYMBOL_RATE / 2.0 # One-sided bandwidth of sideband
excess_bw = 0.1152 #1.0-(0.5*ATSC_SYMBOL_RATE/ATSC_CHANNEL_BW) # ~10.3%
ntaps = int((2*ATSC_RRC_SYMS+1)*sps*nfilts)
interp = output_rate / input_rate
gain = nfilts*symbol_rate/filter_rate
rrc_taps = filter.firdes.root_raised_cosine(gain, # Filter gain
filter_rate, # PFB filter prototype rate
symbol_rate, # ATSC symbol rate
excess_bw, # ATSC RRC excess bandwidth
ntaps) # Length of filter
pfb = filter.pfb_arb_resampler_ccf(interp, rrc_taps, nfilts)
# Connect pipeline
self.connect(self, pfb, self)
|
wright-group/WrightTools | tests/kit/diff.py | Python | mit | 1,245 | 0.001606 | """Test diff."""
# --- import -------------------------------------------------------------------------------------
import numpy as np
import WrightTools as wt
# --- test ---------------------------------------------------------------------------------------
def test_ascending_1():
x = np.linspace(0, 10, 1000)
y = np.sin(x)
d = wt.kit.diff(x, y)
assert np.all((np.abs(d - np.cos(x)) < 0.0001)[:-1])
def test_ascending_2():
x = np.linspace(0, 10, 1000)
y = np.sin(x)
d = wt.kit.diff(x, y, 2)
assert np.all((np.abs(d + np.sin(x)) < 0.0001)[1:-2])
def test_ascending_3():
x = np.linspace(0, 10, 1000)
y = | np.sin(x)
d = wt.kit.diff(x, y, 3)
assert np.all((np.abs(d + np.cos(x)) < 0.0001)[2:-3])
def test_ascending_4():
x = np.linspace(0, 10, 1000)
y = np.sin(x)
d = wt.kit.diff(x, y, 4)
assert np.all((np.abs(d - np.sin(x)) < 0.0001)[3:-4])
def test_descending | _1():
x = np.linspace(10, 0, 1000)
y = np.sin(x)
d = wt.kit.diff(x, y)
assert np.all((np.abs(d - np.cos(x)) < 0.0001)[1:-1])
def test_descending_3():
x = np.linspace(10, 0, 1000)
y = np.sin(x)
d = wt.kit.diff(x, y, 3)
assert np.all((np.abs(d + np.cos(x)) < 0.0001)[3:-3])
|
BrentonEarl/slpkg | slpkg/sbo/slackbuild.py | Python | gpl-3.0 | 13,275 | 0 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# slackbuild.py file is part of slpkg.
# Copyright 2014-2015 Dimitris Zlatanidis <d.zlatanidis@gmail.com>
# All rights reserved.
# Slpkg is a user-friendly package manager for Slackware installations
# https://github.com/dslackw/slpkg
# Slpkg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from slpkg.utils import Utils
from slpkg.messages import Msg
from slpkg.toolbar import status
from slpkg.log_deps import write_deps
from slpkg.blacklist import BlackList
from slpkg.downloader import Download
from slpkg.__metadata__ import MetaData as _meta_
from slpkg.pkg.find import find_package
from slpkg.pkg.build import BuildPackage
from slpkg.pkg.manager import PackageManager
from slpkg.pkg.installed import GetFromInstalled
from slpkg.sbo.greps import SBoGrep
from slpkg.sbo.remove import delete
from slpkg.sbo.sbo_arch import SBoArch
from slpkg.sbo.compressed import SBoLink
from slpkg.sbo.dependency import Requires
from slpkg.sbo.search import sbo_search_pkg
from slpkg.sbo.slack_find import slack_package
class SBoInstall(object):
"""Build and install SBo packages with all dependencies
"""
def __init__(self, slackbuilds, flag):
self.slackbuilds = slackbuilds
self.flag = flag
self.meta = _meta_
self.msg = Msg()
self.arch = SBoArch().get()
self.build_folder = self.meta.build_path
for fl in self.flag:
if fl.startswith("--directory-prefix="):
self.build_folder = fl.split("=")[1]
if not self.build_folder.endswith("/"):
self.build_folder += "/"
self.unst = ["UNSUPPORTED", "UNTESTED"]
self.master_packages = []
self.deps = []
| self.dependencies = []
self.package_not_found = []
self.package_found = []
self.deps_dict = {}
self.answer = ""
self.match = False
| self.count_ins = 0
self.count_upg = 0
self.count_uni = 0
self.msg.reading()
self.data = SBoGrep(name="").names()
self.blacklist = BlackList().packages(pkgs=self.data, repo="sbo")
def start(self, if_upgrade):
"""Start view, build and install SBo packages
"""
tagc = ""
self.if_upgrade = if_upgrade
self.case_insensitive()
for _sbo in self.slackbuilds:
status(0.03)
if _sbo in self.data and _sbo not in self.blacklist:
sbo_deps = Requires(self.flag).sbo(_sbo)
self.deps += sbo_deps
self.deps_dict[_sbo] = self.one_for_all(sbo_deps)
self.package_found.append(_sbo)
else:
self.package_not_found.append(_sbo)
self.update_deps()
if not self.package_found:
self.match = True
self.matching()
self.master_packages, mas_src = self.sbo_version_source(
self.package_found)
self.msg.done()
if (self.meta.rsl_deps in ["on", "ON"] and
self.flag != "--resolve-off" and not self.match):
self.msg.resolving()
self.dependencies, dep_src = self.sbo_version_source(
self.one_for_all(self.deps))
if (self.meta.rsl_deps in ["on", "ON"] and
self.flag != "--resolve-off" and not self.match):
self.msg.done()
self.clear_masters()
if self.package_found:
print("\nThe following packages will be automatically "
"installed or upgraded \nwith new version:\n")
self.top_view()
self.msg.upg_inst(self.if_upgrade)
# view master packages
for sbo, arch in zip(self.master_packages, mas_src):
tagc = self.tag(sbo)
name = "-".join(sbo.split("-")[:-1])
self.view_packages(tagc, name, sbo.split("-")[-1],
self.select_arch(arch))
self.view_installing_for_deps()
# view dependencies
for dep, arch in zip(self.dependencies, dep_src):
tagc = self.tag(dep)
name = "-".join(dep.split("-")[:-1])
self.view_packages(tagc, name, dep.split("-")[-1],
self.select_arch(arch))
count_total = sum([self.count_ins, self.count_upg,
self.count_uni])
print("\nInstalling summary")
print("=" * 79)
print("{0}Total {1} {2}.".format(
self.meta.color["GREY"], count_total,
self.msg.pkg(count_total)))
print("{0} {1} will be installed, {2} already installed and "
"{3} {4}".format(self.count_uni,
self.msg.pkg(self.count_uni),
self.count_ins, self.count_upg,
self.msg.pkg(self.count_upg)))
print("will be upgraded.{0}\n".format(self.meta.color["ENDC"]))
self.continue_to_install()
else:
self.msg.not_found(self.if_upgrade)
def case_insensitive(self):
"""Matching packages distinguish between uppercase and
lowercase
"""
if "--case-ins" in self.flag:
data_dict = Utils().case_sensitive(self.data)
for name in self.slackbuilds:
index = self.slackbuilds.index(name)
for key, value in data_dict.iteritems():
if key == name.lower():
self.slackbuilds[index] = value
def update_deps(self):
"""Update dependencies dictionary with all package
"""
onelist, dependencies = [], []
onelist = Utils().dimensional_list(self.deps)
dependencies = Utils().remove_dbs(onelist)
for dep in dependencies:
deps = Requires(self.flag).sbo(dep)
self.deps_dict[dep] = self.one_for_all(deps)
def continue_to_install(self):
"""Continue to install ?
"""
if (self.count_uni > 0 or self.count_upg > 0 or
"--download-only" in self.flag):
if self.master_packages and self.msg.answer() in ["y", "Y"]:
installs, upgraded = self.build_install()
if "--download-only" in self.flag:
raise SystemExit()
self.msg.reference(installs, upgraded)
write_deps(self.deps_dict)
delete(self.build_folder)
def view_installing_for_deps(self):
"""View installing message for dependencies
"""
if not self.match and self.dependencies:
print("Installing for dependencies:")
def clear_masters(self):
"""Clear master slackbuilds if already exist in dependencies
or if added to install two or more times
"""
self.master_packages = Utils().remove_dbs(self.master_packages)
for mas in self.master_packages:
if mas in self.dependencies:
self.master_packages.remove(mas)
def matching(self):
"""Return found matching SBo packages
"""
for sbo in self.package_not_found:
for pkg in self.data:
if sbo in pkg and pkg not in self.blacklist:
self.package_found.append(pkg)
def sbo_version_source(self, slackbuilds):
"""Create sbo name with version
"""
sbo_versions, sources = [], []
for sbo in slackbuilds:
status(0.02)
sbo_ve |
dgunter/scapytojson | examples/basic_sniff.py | Python | mit | 1,348 | 0.001484 | '''
The MIT License (MIT)
Copyright (c) 2015 Dan Gunter
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restrictio | n, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WA | RRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from scapy.all import sniff
from pcap_to_json import pcap_to_json
if __name__ == '__main__':
# Sniff two packets using scapy
a = sniff(count=2)
# Convert the captured packets to json
json_data = pcap_to_json(a,json_indent=2)
print(json_data) |
carmark/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/GenFds/Vtf.py | Python | gpl-2.0 | 7,215 | 0.01885 | ## @file
# process VTF generation
#
# Copyright (c) 2007, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from GenFdsGlobalVariable import GenFdsGlobalVariable
import os
from CommonDataClass.FdfClass import VtfClassObject
T_CHAR_LF = '\n'
## generate VTF
#
#
class Vtf (VtfClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
VtfClassObject.__init__(self)
## GenVtf() method
#
# Generate VTF
#
# @param self The object pointer
# @param FdAddressDict dictionary contains FV name and its base address
# @retval Dict FV and corresponding VTF file name
#
def GenVtf(self, FdAddressDict) :
self.GenBsfInf()
OutputFile = os.path.join(GenFdsGlobalVariable.FvDir, self.UiName + '.Vtf')
BaseAddArg = self.GetBaseAddressArg(FdAddressDict)
OutputArg, VtfRawDict = self.GenOutputArg()
Cmd = (
'GenVtf',
) + OutputArg + (
'-f', self.BsfInfName,
) + BaseAddArg
GenFdsGlobalVariable.CallExternalTool(Cmd, "GenFv -Vtf Failed!")
GenFdsGlobalVariable.SharpCounter = 0
return VtfRawDict
## GenBsfInf() method
#
# Generate inf used to generate VTF
#
# @param self The object pointer
#
def GenBsfInf (self):
FvList = self.GetFvList()
self.BsfInfName = os.path.join(GenFdsGlobalVariable.FvDir, self.UiName + '.inf')
BsfInf = open (self.BsfInfName, 'w+')
if self.ResetBin != None:
BsfInf.writelines ("[OPTIONS]" + T_CHAR_LF)
BsfInf.writelines ("IA32_RST_BIN" + \
" = " + \
GenFdsGlobalVariable.MacroExtend(GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.ResetBin)) + \
T_CHAR_LF )
BsfInf.writelines (T_CHAR_LF )
BsfInf.writelines ("[COMPONENTS]" + T_CHAR_LF) |
for ComponentObj in self.ComponentStatementList :
| BsfInf.writelines ("COMP_NAME" + \
" = " + \
ComponentObj.CompName + \
T_CHAR_LF )
if ComponentObj.CompLoc.upper() == 'NONE':
BsfInf.writelines ("COMP_LOC" + \
" = " + \
'N' + \
T_CHAR_LF )
elif ComponentObj.FilePos != None:
BsfInf.writelines ("COMP_LOC" + \
" = " + \
ComponentObj.FilePos + \
T_CHAR_LF )
else:
Index = FvList.index(ComponentObj.CompLoc.upper())
if Index == 0:
BsfInf.writelines ("COMP_LOC" + \
" = " + \
'F' + \
T_CHAR_LF )
elif Index == 1:
BsfInf.writelines ("COMP_LOC" + \
" = " + \
'S' + \
T_CHAR_LF )
BsfInf.writelines ("COMP_TYPE" + \
" = " + \
ComponentObj.CompType + \
T_CHAR_LF )
BsfInf.writelines ("COMP_VER" + \
" = " + \
ComponentObj.CompVer + \
T_CHAR_LF )
BsfInf.writelines ("COMP_CS" + \
" = " + \
ComponentObj.CompCs + \
T_CHAR_LF )
BinPath = ComponentObj.CompBin
if BinPath != '-':
BinPath = GenFdsGlobalVariable.MacroExtend(GenFdsGlobalVariable.ReplaceWorkspaceMacro(BinPath))
BsfInf.writelines ("COMP_BIN" + \
" = " + \
BinPath + \
T_CHAR_LF )
SymPath = ComponentObj.CompSym
if SymPath != '-':
SymPath = GenFdsGlobalVariable.MacroExtend(GenFdsGlobalVariable.ReplaceWorkspaceMacro(SymPath))
BsfInf.writelines ("COMP_SYM" + \
" = " + \
SymPath + \
T_CHAR_LF )
BsfInf.writelines ("COMP_SIZE" + \
" = " + \
ComponentObj.CompSize + \
T_CHAR_LF )
BsfInf.writelines (T_CHAR_LF )
BsfInf.close()
## GenFvList() method
#
# Get FV list referenced by VTF components
#
# @param self The object pointer
#
def GetFvList(self):
FvList = []
for component in self.ComponentStatementList :
if component.CompLoc.upper() != 'NONE' and not (component.CompLoc.upper() in FvList):
FvList.append(component.CompLoc.upper())
return FvList
## GetBaseAddressArg() method
#
# Get base address arguments for GenVtf
#
# @param self The object pointer
#
def GetBaseAddressArg(self, FdAddressDict):
FvList = self.GetFvList()
CmdStr = tuple()
for i in FvList:
(BaseAddress, Size) = FdAddressDict.get(i)
CmdStr += (
'-r', '0x%x' % BaseAddress,
'-s', '0x%x' %Size,
)
return CmdStr
## GenOutputArg() method
#
# Get output arguments for GenVtf
#
# @param self The object pointer
#
def GenOutputArg(self):
FvVtfDict = {}
OutputFileName = ''
FvList = self.GetFvList()
Index = 0
Arg = tuple()
for FvObj in FvList:
Index = Index + 1
OutputFileName = 'Vtf%d.raw' % Index
OutputFileName = os.path.join(GenFdsGlobalVariable.FvDir, OutputFileName)
Arg += ('-o', OutputFileName)
FvVtfDict[FvObj.upper()] = OutputFileName
return Arg, FvVtfDict
|
catapult-project/catapult-csm | telemetry/telemetry/testing/run_tests_unittest.py | Python | bsd-3-clause | 3,839 | 0.003647 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import util
from telemetry.testing import run_tests
class MockArgs(object):
def __init__(self):
self.positional_args = []
self.exact_test_filter = True
self.run_disabled_tests = False
self.skip = []
class MockPossibleBrowser(object):
def __init__(self, browser_type, os_name, os_version_name,
supports_tab_control):
self.browser_type = browser_type
self.platform = MockPlatform(os_name, os_version_name)
self.supports_tab_control = supports_tab_control
class MockPlatform(object):
def __init__(self, os_name, os_version_name):
self.os_name = os_name
self.os_version_name = os_version_name
def GetOSName(self):
return self.os_name
def GetOSVersionName(self):
return self.os_version_name
def GetOSVersionDetailString(self):
return ''
class RunTestsUnitTest(unittest.TestCase):
def _GetEnabledTests(self, browser_type, os_name, os_version_name,
supports_tab_control, args=None):
if not args:
args = MockArgs()
runner = run_tests.typ.Runner()
host = runner.host
runner.top_level_dir = util.GetTelemetryDir()
runner.args.tests = [
host.join(util.GetTelemetryDir(), 'telemetry', 'testing',
'disabled_cases.py')
]
possible_browser = MockPossibleBrowser(
browser_type, os_name, os_version_name, supports_tab_control)
runner.classifier = run_tests.GetClassifier(args, possible_browser)
_, test_set = runner.find_tests(runner.args)
return set(test.name.split('.')[-1] for test in test_set.parallel_tests)
def testSystemMacMavericks(self):
self.assertEquals(
set(['testAllEnabled',
'testMacOnly',
'testMavericksOnly',
'testNoChromeOS',
'testNoWinLinux',
'testSystemOnly',
'testHasTabs']),
self._GetEnabledTests('system', 'mac', 'mavericks', True))
def testSystemMacLion(self):
self.assertEquals(
set(['testAllEnabled',
'testMacOnly',
'testNoChromeOS',
'testNoMavericks',
'testNoWinLinux',
'testSystemOnly',
'testHasTabs']),
self._GetEnabledTests('system', 'mac', 'lion', True))
def testCrosGuestChromeOS(self):
self.assertEquals(
set(['testAllEnabled',
'testChromeOSOnly',
'testNoMac',
'testNoMavericks',
'testNoSystem',
'testNoWinLinux',
'testHasTabs']),
self._GetEnabledTests('cros-guest', 'chromeos', '', True))
def testCanaryWindowsWin7(self):
self.assertEquals(
set(['test | AllEnabled',
'testNoChromeOS',
'testNoMac',
'testNoMavericks',
'testNoSystem',
'testWinOrLinuxOnly',
'testHasTabs']),
self._GetEnabledTests('canary', 'win', 'win7', True))
def testDoesntHaveTabs(self):
| self.assertEquals(
set(['testAllEnabled',
'testNoChromeOS',
'testNoMac',
'testNoMavericks',
'testNoSystem',
'testWinOrLinuxOnly']),
self._GetEnabledTests('canary', 'win', 'win7', False))
def testSkip(self):
args = MockArgs()
args.skip = ['telemetry.*testNoMac', '*NoMavericks',
'telemetry.testing.disabled_cases.DisabledCases.testNoSystem']
self.assertEquals(
set(['testAllEnabled',
'testNoChromeOS',
'testWinOrLinuxOnly',
'testHasTabs']),
self._GetEnabledTests('canary', 'win', 'win7', True, args))
|
nick3085/CMTPRG01-5-praktijkopdracht-materialenapp | materialenapp/materialmanager/migrations/0004_auto_20170728_1418.py | Python | mit | 874 | 0.002288 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-07-28 14:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('materialmanager', '0003_remove_delivery_supplier'),
]
operations = [
migrations.AddField(
model_name='delivery',
| name='location',
field=models.ForeignKey(null=True | , on_delete=django.db.models.deletion.SET_NULL,
to='materialmanager.Location'),
),
migrations.AddField(
model_name='delivery',
name='supplier',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL,
to='materialmanager.Supplier'),
),
]
|
davemc84/bitcoin-payable-black-scholes | bs.py | Python | mit | 1,318 | 0.002276 | import json
from flask import Flask, request
from two1.lib.wallet import Wallet
from two1.lib.bitserv.flask import Payment
from scipy.stats import norm
from math import *
app = Flask(__name__)
wallet = Wallet()
payment = Payment(app, wallet)
@app.route('/bs')
@payment.required(10)
def BlackScholes():
data = {}
S = float(request.args.get('price'))
K = float(request.args.get('strike'))
T = float(request.args.get('time'))
R = float(request.args.get('rate'))
V = float(request.args.get('vol'))
d1 = (log(float(S)/K)+(R+V*V/2.)*T)/(V*sqrt(T))
d2 = d1-V*sqrt(T)
data['cPrice'] = S*norm.cdf(d1)-K*exp(-R*T)*norm.cdf(d2)
data['pPrice'] = K*exp(-R*T)-S+data['cPrice']
data['cDelta'] = norm.cdf(d1)
data['cGamma'] = norm.pdf(d1)/(S*V*sqrt(T))
data['cTheta'] = (-(S*V*norm.pdf(d1))/(2*sqrt(T))-R* | K*exp(-R*T)*norm.cdf(d2))/365
data['cVega'] = S*sqrt(T)*norm.pdf(d1)/100
data['cRho'] = K*T*exp(-R*T)*norm.cdf(d2)/100
data['pDelta'] = data['cDelta']-1
data['pGamma'] = da | ta['cGamma']
data['pTheta'] = (-(S*V*norm.pdf(d1))/(2*sqrt(T))+R*K*exp(-R*T)*norm.cdf(-d2))/365
data['pVega'] = data['cVega']
data['pRho'] = -K*T*exp(-R*T)*norm.cdf(-d2)/100
return json.dumps(data)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
CloudBoltSoftware/cloudbolt-forge | blueprints/google_cloud_function/sync.py | Python | apache-2.0 | 2,506 | 0.007183 | """
Discover google cloud functions on google cloud
"""
from common.methods import set_progress
from resourcehandlers.gcp.models import GCPHandler
from oauth2client.service_account import ServiceAccountCredentials
from googleapiclient.discovery import build
from google.oauth2.credentials import Credentials
import json
RESOURCE_IDENTIFIER = 'function_name'
def discover_resources(**kwargs):
discovered_functions = []
projects = []
for handler in GCPHandler.objects.all():
set_progress('Connecting to GCP for \
handler: {}'.format(handler))
projects = handler.gcp_projects.get(imported=True)
for project in projects:
project_name=project.name
credentials_dict = json.loads(handler.gcp_api_credentials)
credentials = Credentials(**credentials_dict)
service_name = 'cloudfunctions'
version = 'v1'
client = build(service_name, version, credentials=credentials, cache_discovery=False)
locations=client.projects().locations().list(name=f'projects/{project_name}').execute()
regions= [region.get('locationId') for region in locations['locations']]
set_progress("Connection established")
for region in regions:
results = client.projects().locations().functions().list(
parent=f"projects/{project_name}/locations/{region}").execute()
functions = results.get('functions')
if functions:
for result in functions:
print(result)
discovered_functions.append(
| {
'name': result.get('name').split('/')[-1],
'google_rh_id': handler.id,
'function_name': result.get('name'),
'available_memory_mb': result.get('availableMemoryMb'),
'entry_point': result.get('entryPoint'),
'runtime': result.get('runtime'),
'service_acc | ount_email': handler.serviceaccount,
'https_trigger': result.get('httpsTrigger').get('url'),
'source_archive_url': result.get('sourceArchiveUrl'),
}
)
return discovered_functions
|
jesopo/bitbot | modules/inactive_channels.py | Python | gpl-2.0 | 2,786 | 0.003948 | import datetime
from src import ModuleManager, utils
PRUNE_TIMEDELTA = datetime.timedelta(weeks=4)
SETTING_NAME = "inactive-prune"
SETTING = utils.IntRangeSetting(0, None, SETTING_NAME,
"Amount of days of inactivity before we leave a channel")
MODE_SETTING_NAME = "inactive-prune-modes"
MODE_SETTING = utils.BoolSetting(MODE_SETTING_NAME,
"Whether or not we will leave inactive channels that we have a mode in")
@utils.export("botset", SETTING)
@utils.export("serverset", SETTING)
@utils.export("serverset", MODE_SETTING)
@utils.export("channelset", MODE_SETTING)
@utils.export("channelset", utils.BoolSetting(SETTING_NAME,
"Whether or not to leave this channel when it is inactive"))
class Module(ModuleManager.BaseModule):
def _get_timestamp(self, channel):
return channel.get_setting("last-message", None)
def _set_timestamp(self, channel):
channel.set_setting("last-message",
utils.datetime.format.iso8601(utils.datetime.utcnow()))
def _del_timestamp(self, channel):
channel.del_setting("last-message")
@utils.hook("new.channel")
def new_channel(self, event):
if self._get_timestamp(event["channel"]) == None:
self._set_timestamp(event["channel"])
@utils.hook("cron")
@utils.kwarg("schedule", | "0")
| def hourly(self, event):
parts = []
now = utils.datetime.utcnow()
botwide_days = self.bot.get_setting(SETTING_NAME, None)
botwide_mode_setting = self.bot.get_setting(MODE_SETTING_NAME, False)
for server in self.bot.servers.values():
serverwide_days = server.get_setting(SETTING_NAME, botwide_days)
if serverwide_days == None:
continue
mode_setting = server.get_setting(
MODE_SETTING_NAME, botwide_mode_setting)
our_user = server.get_user(server.nickname)
for channel in server.channels:
if (not channel.get_setting(SETTING_NAME, True) or
not mode_setting and channel.get_user_modes(our_user)):
continue
timestamp = self._get_timestamp(channel)
if timestamp:
dt = utils.datetime.parse.iso8601(timestamp)
if (now-dt).days >= serverwide_days:
parts.append([server, channel])
for server, channel in parts:
self.log.warn("Leaving %s:%s due to channel inactivity",
[str(server), str(channel)])
channel.send_part("Channel inactive")
self._del_timestamp(channel)
@utils.hook("send.message.channel")
@utils.hook("received.message.channel")
def channel_message(self, event):
self._set_timestamp(event["channel"])
|
grapesmoker/geogame2 | geogame_core/admin_views/__init__.py | Python | gpl-2.0 | 25 | 0.04 | fro | m admin_views | import * |
cemarchi/biosphere | Src/BioAnalyzer/Managers/GenePrioritization/GlobalDifferentialMessengerRnaSampleManager.py | Python | bsd-3-clause | 2,299 | 0.007829 | from typing import Dict
from yaak import inject
from Src.BioAnalyzer.CrossCutting.DTOs.GenePrioritization.GlobalDifferentialSampleDto import GlobalDifferentialSampleDto
from Src.BioAnalyzer.CrossCutting.Filters.GenePrioritization.FeSingleGlobalDifferentialSample import \
FeSingleGlobalDifferentialSample
from Src.Core.Manager.ManagerBase import ManagerBase
class GlobalDifferentialMessengerRnaSampleManager(ManagerBase):
"""description of class"""
@inject.Param(repository='GlobalDifferentialMessengerRnaSampleRepositoryBase')
def __init__(self, repository):
"""
:param repository:
"""
super().__init__(repository)
def add_one(self, diff_mrna_sample: GlobalDifferentialSampleDto):
| """
:param diff_mrna_sample:
:return:
"""
diff_mrna_sample = GlobalDifferentialSampleDto(
values=list(set([v for v in diff_mrna_sample.values if v.element_id > 0])))
fe_diff_mrna = self.get_one(FeSingle | GlobalDifferentialSample(), {'values': 0})
if fe_diff_mrna.result:
self._repository.replace_one(diff_mrna_sample)
else:
self._repository.add_one(diff_mrna_sample)
def get_one(self, fe_diff_mrna: FeSingleGlobalDifferentialSample,
include_or_exclude_fields: Dict[str, int] = None) -> FeSingleGlobalDifferentialSample:
"""
:param fe_diff_mrna:
:param include_or_exclude_fields:
:return:
"""
fe_diff_mrna = self._repository.get_one(fe_diff_mrna, GlobalDifferentialSampleDto, include_or_exclude_fields)
if not fe_diff_mrna.is_highly_significant:
return fe_diff_mrna
fe_diff_mrna.result = GlobalDifferentialSampleDto(
values=[v for v in fe_diff_mrna.result.values
if (not fe_diff_mrna.is_highly_significant or
v.is_highly_significant == fe_diff_mrna.is_highly_significant) and
(not fe_diff_mrna.except_status or
v.status != fe_diff_mrna.except_status) and
(not fe_diff_mrna.element_id_list or
v.element_id in fe_diff_mrna.element_id_list)])
return fe_diff_mrna
|
radheygupta/editorialsnow | editorials/admin.py | Python | apache-2.0 | 703 | 0.007112 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django import forms
from .models import Editorials
class EditorialsModelForm(forms.ModelForm):
class Meta:
model = Editorials
fields = '__all__'
widgets = {
'con | tent': forms.Textarea(attrs={'cols': 80, 'rows': 20}),
'subtitle': forms.Textarea(attrs={'cols': 80, 'rows': 3}),
}
class EditorialsAdmin( admin.ModelAdmin ):
form = EditorialsModelForm
list_display = ('title', 'publis | hed_date', 'news_paper')
list_filter = ['published_date']
search_fields = ['title']
admin.site.register(Editorials, EditorialsAdmin)
|
titusfortner/selenium | py/selenium/webdriver/support/wait.py | Python | apache-2.0 | 4,895 | 0.001839 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
from selenium.common.exceptions import NoSuchElementException, InvalidSelectorException
from selenium.common.exceptions import TimeoutException
POLL_FREQUENCY = 0.5 # How long to sleep in between calls to the method
IGNORED_EXCEPTIONS = (NoSuchElementException,) # exceptions ignored during calls to the method
class WebDriverWait(object):
def __init__(self, driver, timeout, poll_frequency=POLL_FREQUENCY, ignored_exceptions=None):
"""Constructor, takes a WebDriver instance and timeout in seconds.
:Args:
- driver - Instance of WebDriver (Ie, Firefox, Chrome or Remote)
- timeout - Number of seconds before timing out
- poll_frequency - sleep interval between calls
By default, it is 0.5 second.
- ignored_exceptions - iterable structure of exception classes ignored during calls.
By default, it contains NoSuchElementException only.
Example::
from selenium.webdriver.support.wait import WebDriverWait \n
element = WebDriverWait(driver, 10).until(lambda x: x.find_element(By.ID, "someId")) \n
is_disappeared = WebDriverWait(driver, 30, 1, (ElementNotVisibleException)).\\ \n
until_not(lambda x: x.find_element(By.ID, "someId").is_displayed())
"""
self._driver = driver
self._timeout = float(timeout)
self._poll = poll_frequency
# avoid the divide by zero
if self._poll == 0:
self._poll = POLL_FREQUENCY
exceptions = list(IGNORED_EXCEPTIONS)
if ignored_exceptions:
try:
exceptions.extend(iter(ignored_exceptions))
except TypeError: # ignored_exceptions is not iterable
exceptions.append(ignored_exceptions)
self._ignored_exceptions = tuple(exceptions)
def __repr__(self):
return '<{0.__module__}.{0.__name__} (session="{1}")>'.format(
type(self), self._driver.session_id)
def until(self, method, message=''):
"""Calls the method provided with the driver as an argument until the \
return value does not evaluate to ``False``.
:param method: callable(WebDriver)
:param message: optional message for :exc:`TimeoutException`
:returns: the result of the last call to `method`
:raises: :exc:`selenium.common.exceptions.TimeoutException` if timeout occurs
"""
screen = None
stacktrace = None
end_time = time.time() + self._timeout
while True:
try:
value = method(self._driver)
if value:
return value
except InvalidSelectorException as e:
raise e
except self._ignored_exceptions as exc:
screen = getattr(exc, 'screen', None)
stacktrace = getattr(exc, 'stacktrace', None)
time.sleep(self._poll)
if time.time() > end_time:
break
raise TimeoutException(message, screen, stacktrace)
def until_not(self, method, message=''):
"""Calls the meth | od provided with the driver as an argument until the \
return value evaluates to ``False``.
:param method: callable(WebDriver)
:param message: optional message for :exc:`TimeoutException`
:returns: the result of the last call to `method`, or
``True`` if `method` has raised one of the ignored exceptions
:raises: :exc:`selenium.common.exceptions.TimeoutException` if timeout occurs
"""
end_time = time.time() + self._timeout
| while True:
try:
value = method(self._driver)
if not value:
return value
except InvalidSelectorException as e:
raise e
except self._ignored_exceptions:
return True
time.sleep(self._poll)
if time.time() > end_time:
break
raise TimeoutException(message)
|
innotechsoftware/Quantum-GIS | tests/src/python/test_qgsspatialindex.py | Python | gpl-2.0 | 2,052 | 0.000487 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsSpatialIndex.
.. note:: This program is free software; you can redistribute it and/or modi | fy
it under the terms of the GNU General Public License as published by
the Free S | oftware Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alexander Bruy'
__date__ = '20/01/2011'
__copyright__ = 'Copyright 2012, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import unittest
from qgis.core import (QgsSpatialIndex,
QgsFeature,
QgsGeometry,
QgsRectangle,
QgsPoint)
from utilities import getQgisTestApp
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
class TestQgsSpatialIndex(unittest.TestCase):
def testIndex(self):
idx = QgsSpatialIndex()
fid = 0
for y in range(5, 15, 5):
for x in range(5, 25, 5):
ft = QgsFeature()
ft.setFeatureId(fid)
ft.setGeometry(QgsGeometry.fromPoint(QgsPoint(x, y)))
idx.insertFeature(ft)
fid += 1
# intersection test
rect = QgsRectangle(7.0, 3.0, 17.0, 13.0)
fids = idx.intersects(rect)
myExpectedValue = 4
myValue = len(fids)
myMessage = 'Expected: %s Got: %s' % (myExpectedValue, myValue)
self.assertEqual(myValue, myExpectedValue, myMessage)
fids.sort()
myMessage = ('Expected: %s\nGot: %s\n' %
([1, 2, 5, 6], fids))
assert fids == [1, 2, 5, 6], myMessage
# nearest neighbor test
fids = idx.nearestNeighbor(QgsPoint(8.75, 6.25), 3)
myExpectedValue = 0
myValue = len(fids)
myMessage = 'Expected: %s Got: %s' % (myExpectedValue, myValue)
fids.sort()
myMessage = ('Expected: %s\nGot: %s\n' %
([0, 1, 5], fids))
assert fids == [0, 1, 5], myMessage
|
dayatz/taiga-back | taiga/projects/epics/models.py | Python | agpl-3.0 | 5,936 | 0.003033 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.postgres.fields import ArrayField
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from taiga.base.utils.colors import generate_random_predefined_hex_color
from taiga.base.utils.time import timestamp_ms
from taiga.projects.tagging.models import TaggedMixin
from taiga.projects.occ import OCCModelMixin
from taiga.projects.notifications.mixins import WatchedModelMixin
from taiga.projects.mixins.blocked import BlockedMixin
class Epic(OCCModelMixin, WatchedModelMixin, BlockedMixin, TaggedMixin, models.Model):
ref = models.BigIntegerField(db_index=True, null=True, blank=True, default=None,
verbose_name=_("ref"))
project = models.ForeignKey("projects.Project", null=False, blank=False,
related_name="epics", verbose_name=_("project"))
owner = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True,
related_name="owned_epics", verbose_name=_("owner"),
on_delete=models.SET_NULL)
status = models.ForeignKey("projects.EpicStatus", null=True, blank=True,
related_name="epics", verbose_name=_("status"),
on_delete=models.SET_NULL)
epics_order = models.BigIntegerField(null=False, blank=False, default=timestamp_ms,
verbose_name=_("epics order"))
created_date = models.DateTimeField(null=False, blank=False,
verbose_name=_("created date"),
default=timezone.now)
modified_date = models.DateTimeField(null=False, blank=False,
verbose_name=_("modified date"))
subject = models.TextField(null=False, blank=False,
verbose_name=_("subject"))
description = models.TextField(null=False, blank=True, verbose_name=_("description"))
color = models.CharField(max_length=32, null=False, blank=True,
default=generate_random_predefined_hex_color,
verbose_name=_("color"))
assigned_to = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True,
default=None, related_name="epics_assigned_to_me",
verbose_name=_("assigned to"))
client_requirement = models.BooleanField(default=False, null=False, blank=True,
verbose_name=_("is client requirement"))
team_requirement = models.BooleanField(default=False, null=False, blank=True,
verbose_name=_("is team requirement"))
user_stories = models.ManyToManyField("userstories.UserStory", related_name="epics",
through='RelatedUserStory',
verbose_name=_("user stories"))
external_reference = ArrayField(models.TextField(null=False, blank=False),
null=True, blank=True, default=None, verbose_name=_("external reference"))
attachments = GenericRelation("attachments.Attachment")
_importing = None
class Meta:
verbose_name = "epic"
verbose_name_plural = "epics"
ordering = ["project", "epics_order", "ref"]
def __str__(self):
return "#{0} {1}".format(self.ref, self.subject)
def __repr__(self):
return "<Epic %s>" % (self.id)
def save(self, *args, **kwargs):
if not self._importing or not self.modified_date:
self.modified_date = timezone.now()
if not self.statu | s:
self.status = se | lf.project.default_epic_status
super().save(*args, **kwargs)
class RelatedUserStory(WatchedModelMixin, models.Model):
user_story = models.ForeignKey("userstories.UserStory", on_delete=models.CASCADE)
epic = models.ForeignKey("epics.Epic", on_delete=models.CASCADE)
order = models.BigIntegerField(null=False, blank=False, default=timestamp_ms,
verbose_name=_("order"))
class Meta:
verbose_name = "related user story"
verbose_name_plural = "related user stories"
ordering = ["user_story", "order", "id"]
unique_together = (("user_story", "epic"), )
def __str__(self):
return "{0} - {1}".format(self.epic_id, self.user_story_id)
@property
def project(self):
return self.epic.project
@property
def project_id(self):
return self.epic.project_id
@property
def owner(self):
return self.epic.owner
@property
def owner_id(self):
return self.epic.owner_id
@property
def assigned_to_id(self):
return self.epic.assigned_to_id
|
mikaperlin/scripts-configs-etc | templates/py_figs.py | Python | mit | 1,445 | 0.018685 | # dependency
import matplotlib as mp
# set fonts and use latex packages
params = { "font.family" : "serif",
"font.serif" : "Computer Modern",
"text.usetex" : True,
"text.latex.preamble" : r"\usepackage{amsmath}",
"font.size" : font_size }
rcParams.update(params)
# default color cycle and conversion between hex and RGB color values
color_cycle = plt.rcParams["axes.prop_cycle"].by_key()["color"]
def hex_to_rgb(color):
return tuple( int(color[1+2*jj:1+2*jj+1],16)/16 for jj in range(3) )
# rasterizing figure, but not the text
gca().set_rasterization_zorder(1) # rasterized anything with zorder < 1
# plot(stuff, zorder = 0)
# xlabel(words, zorder = 1)
# savefig(things, rasterized = True, dpi = fig_dpi)
# figure parameters
fig_x = 15
fig_y = 10
font_size = 12
lineWidth = 1
rcParams["font.size"] = font_size
rcParams["axes.titlesize"] = font_size
rcParams["legend.fontsize"] = font_size
rcParams["legend.numpoints"] = 1
rcParams["xtick.labelsize"] = font_size
rcParams["ytick.labelsize"] = font_size
rcParams["lines.linewidth"] = lineWidth
rcParams["axes.color_cycle"] = ["k","b","g","r","c | ","m","y"]
# using scientific notation for plot axes
plt.ticklabel_format(axis = "both", style = "sci", scilimits = (-2,2))
# setting colorbar limits (with pcolormesh) and scientific nota | tion
cb = colorbar()
clim(0,1) # range from 0 to 1
cb.formatter.set_powerlimits((-2, 3))
cb.update_ticks()
|
packagecontrol/st_package_reviewer | st_package_reviewer/check/repo/check_readme.py | Python | mit | 188 | 0 | from . import RepoChecker
class CheckReadme(RepoChecker):
| def check(self):
readme = self.repo. | readme()
if not readme:
self.fail("Missing a README file")
|
sonntagsgesicht/regtest | .aux/venv/lib/python3.9/site-packages/pygments/lexers/stata.py | Python | apache-2.0 | 6,414 | 0.002027 | """
pygments.lexers.stata
~~~~~~~~~~~~~~~~~~~~~
Lexer for Stata
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, default, include, words
from pygments.token import Comment, Keyword, Name, Number, \
String, Text, Operator
from pygments.lexers._stata_builtins import builtins_base, builtins_functions
__all__ = ['StataLexer']
class StataLexer(RegexLexer):
"""
For `Stata <http://www.stata.com/>`_ do files.
.. versionadded:: 2.2
"""
# Syntax based on
# - http://fmwww.bc.edu/RePEc/bocode/s/synlightlist.ado
# - https://github.com/isagalaev/highlight.js/blob/master/src/languages/stata.js
# - https://github.com/jpitblado/vim-stata/blob/master/syntax/stata.vim
name = 'Stata'
aliases = ['stata', 'do']
filenames = ['*.do', '*.ado']
mimetypes = ['text/x-stata', 'text/stata', 'application/x-stata']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
include('comments'),
include('strings'),
include('macros'),
include('numbers'),
include('keywords'),
include('operators'),
include('format'),
(r'.', Text),
],
# Comments are a complicated beast in Stata because they can be
# nested and there are a few corner cases with that. See:
# - github.com/kylebarron/language-stata/issues/90
# - statalist.org/forums/forum/general-stata-discussion/general/1448244
'comments': [
(r'(^//|(?<=\s)//)(?!/)', Comment.Single, 'comments-double-slash'),
(r'^\s*\*', Comment.Single, 'comments-star'),
(r'/\*', Comment.Multiline, 'comments-block'),
(r'(^///|(?<=\s)///)', Comment.Special, 'comments-triple-slash')
],
'comments-block': [
(r'/\*', Comment.Multiline, '#push'),
# this ends and restarts a comment block. but need to catch this so
# that it doesn\'t start _another_ level of comment blocks
(r'\*/\*', Comment.Multiline),
(r'(\*/\s+\*(?!/)[^\n]*)|(\*/)', Comment.Multiline, '#pop'),
# Mat | ch anything else as a character inside the comment
(r'.', Comment.Multiline),
],
'comments-star': [
(r'///.*?\n', Comment.Single,
('#pop', 'comments-triple-slash')),
(r'(^//|(?<=\s)//)(?!/)', Comment.Singl | e,
('#pop', 'comments-double-slash')),
(r'/\*', Comment.Multiline, 'comments-block'),
(r'.(?=\n)', Comment.Single, '#pop'),
(r'.', Comment.Single),
],
'comments-triple-slash': [
(r'\n', Comment.Special, '#pop'),
# A // breaks out of a comment for the rest of the line
(r'//.*?(?=\n)', Comment.Single, '#pop'),
(r'.', Comment.Special),
],
'comments-double-slash': [
(r'\n', Text, '#pop'),
(r'.', Comment.Single),
],
# `"compound string"' and regular "string"; note the former are
# nested.
'strings': [
(r'`"', String, 'string-compound'),
(r'(?<!`)"', String, 'string-regular'),
],
'string-compound': [
(r'`"', String, '#push'),
(r'"\'', String, '#pop'),
(r'\\\\|\\"|\\\$|\\`|\\\n', String.Escape),
include('macros'),
(r'.', String)
],
'string-regular': [
(r'(")(?!\')|(?=\n)', String, '#pop'),
(r'\\\\|\\"|\\\$|\\`|\\\n', String.Escape),
include('macros'),
(r'.', String)
],
# A local is usually
# `\w{0,31}'
# `:extended macro'
# `=expression'
# `[rsen](results)'
# `(++--)scalar(++--)'
#
# However, there are all sorts of weird rules wrt edge
# cases. Instead of writing 27 exceptions, anything inside
# `' is a local.
#
# A global is more restricted, so we do follow rules. Note only
# locals explicitly enclosed ${} can be nested.
'macros': [
(r'\$(\{|(?=[$`]))', Name.Variable.Global, 'macro-global-nested'),
(r'\$', Name.Variable.Global, 'macro-global-name'),
(r'`', Name.Variable, 'macro-local'),
],
'macro-local': [
(r'`', Name.Variable, '#push'),
(r"'", Name.Variable, '#pop'),
(r'\$(\{|(?=[$`]))', Name.Variable.Global, 'macro-global-nested'),
(r'\$', Name.Variable.Global, 'macro-global-name'),
(r'.', Name.Variable), # fallback
],
'macro-global-nested': [
(r'\$(\{|(?=[$`]))', Name.Variable.Global, '#push'),
(r'\}', Name.Variable.Global, '#pop'),
(r'\$', Name.Variable.Global, 'macro-global-name'),
(r'`', Name.Variable, 'macro-local'),
(r'\w', Name.Variable.Global), # fallback
default('#pop'),
],
'macro-global-name': [
(r'\$(\{|(?=[$`]))', Name.Variable.Global, 'macro-global-nested', '#pop'),
(r'\$', Name.Variable.Global, 'macro-global-name', '#pop'),
(r'`', Name.Variable, 'macro-local', '#pop'),
(r'\w{1,32}', Name.Variable.Global, '#pop'),
],
# Built in functions and statements
'keywords': [
(words(builtins_functions, prefix = r'\b', suffix = r'(?=\()'),
Name.Function),
(words(builtins_base, prefix = r'(^\s*|\s)', suffix = r'\b'),
Keyword),
],
# http://www.stata.com/help.cgi?operators
'operators': [
(r'-|==|<=|>=|<|>|&|!=', Operator),
(r'\*|\+|\^|/|!|~|==|~=', Operator)
],
# Stata numbers
'numbers': [
# decimal number
(r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[i]?\b',
Number),
],
# Stata formats
'format': [
(r'%-?\d{1,2}(\.\d{1,2})?[gfe]c?', Name.Other),
(r'%(21x|16H|16L|8H|8L)', Name.Other),
(r'%-?(tc|tC|td|tw|tm|tq|th|ty|tg)\S{0,32}', Name.Other),
(r'%[-~]?\d{1,4}s', Name.Other),
]
}
|
nabsboss/CouchPotatoServer | libs/guessit/__init__.py | Python | gpl-3.0 | 5,753 | 0.00365 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__version__ = '0.5.2'
__all__ = ['Guess', 'Language',
'guess_file_info', 'guess_video_info',
'guess_movie_info', 'guess_episode_info']
|
# Do python3 detection before importing any other module, to be sure that
# it will then always be available
# with code from http://lucumr.pocoo.org/2011/1/22/forwards-compatible-python/
import sys
if sys.version_info[0] >= 3:
PY3 = True
un | icode_text_type = str
native_text_type = str
base_text_type = str
def u(x):
return str(x)
def s(x):
return x
class UnicodeMixin(object):
__str__ = lambda x: x.__unicode__()
import binascii
def to_hex(x):
return binascii.hexlify(x).decode('utf-8')
else:
PY3 = False
__all__ = [ str(s) for s in __all__ ] # fix imports for python2
unicode_text_type = unicode
native_text_type = str
base_text_type = basestring
def u(x):
if isinstance(x, str):
return x.decode('utf-8')
return unicode(x)
def s(x):
if isinstance(x, unicode):
return x.encode('utf-8')
if isinstance(x, list):
return [ s(y) for y in x ]
if isinstance(x, tuple):
return tuple(s(y) for y in x)
if isinstance(x, dict):
return dict((s(key), s(value)) for key, value in x.items())
return x
class UnicodeMixin(object):
__str__ = lambda x: unicode(x).encode('utf-8')
def to_hex(x):
return x.encode('hex')
from guessit.guess import Guess, merge_all
from guessit.language import Language
from guessit.matcher import IterativeMatcher
import logging
log = logging.getLogger(__name__)
class NullHandler(logging.Handler):
def emit(self, record):
pass
# let's be a nicely behaving library
h = NullHandler()
log.addHandler(h)
def guess_file_info(filename, filetype, info=None):
"""info can contain the names of the various plugins, such as 'filename' to
detect filename info, or 'hash_md5' to get the md5 hash of the file.
>>> guess_file_info('tests/dummy.srt', 'autodetect', info = ['hash_md5', 'hash_sha1'])
{'hash_md5': 'e781de9b94ba2753a8e2945b2c0a123d', 'hash_sha1': 'bfd18e2f4e5d59775c2bc14d80f56971891ed620'}
"""
result = []
hashers = []
if info is None:
info = ['filename']
if isinstance(info, base_text_type):
info = [info]
for infotype in info:
if infotype == 'filename':
m = IterativeMatcher(filename, filetype=filetype)
result.append(m.matched())
elif infotype == 'hash_mpc':
from guessit.hash_mpc import hash_file
try:
result.append(Guess({'hash_mpc': hash_file(filename)},
confidence=1.0))
except Exception as e:
log.warning('Could not compute MPC-style hash because: %s' % e)
elif infotype == 'hash_ed2k':
from guessit.hash_ed2k import hash_file
try:
result.append(Guess({'hash_ed2k': hash_file(filename)},
confidence=1.0))
except Exception as e:
log.warning('Could not compute ed2k hash because: %s' % e)
elif infotype.startswith('hash_'):
import hashlib
hashname = infotype[5:]
try:
hasher = getattr(hashlib, hashname)()
hashers.append((infotype, hasher))
except AttributeError:
log.warning('Could not compute %s hash because it is not available from python\'s hashlib module' % hashname)
else:
log.warning('Invalid infotype: %s' % infotype)
# do all the hashes now, but on a single pass
if hashers:
try:
blocksize = 8192
hasherobjs = dict(hashers).values()
with open(filename, 'rb') as f:
chunk = f.read(blocksize)
while chunk:
for hasher in hasherobjs:
hasher.update(chunk)
chunk = f.read(blocksize)
for infotype, hasher in hashers:
result.append(Guess({infotype: hasher.hexdigest()},
confidence=1.0))
except Exception as e:
log.warning('Could not compute hash because: %s' % e)
result = merge_all(result)
# last minute adjustments
# if country is in the guessed properties, make it part of the filename
if 'country' in result:
result['series'] += ' (%s)' % result['country'].alpha2.upper()
return result
def guess_video_info(filename, info=None):
return guess_file_info(filename, 'autodetect', info)
def guess_movie_info(filename, info=None):
return guess_file_info(filename, 'movie', info)
def guess_episode_info(filename, info=None):
return guess_file_info(filename, 'episode', info)
|
flomotlik/formica | tests/unit/test_config_file.py | Python | mit | 4,899 | 0.001429 | import pytest
from path import Path
import yaml
from uuid import uuid4
from formica import cli
from tests.unit.constants import (REGION, PROFILE, STACK,
CHANGE_SET_PARAMETERS, CHANGE_SET_STACK_TAGS,
FULL_CONFIG_FILE, CHANGE_SET_CAPABILITIES,
ROLE_ARN, VARS)
def test_loads_config_file(mocker, tmpdir, session):
stacks = mocker.patch('formica.cli.stacks')
file_name = 'test.config.yaml'
with Path(tmpdir):
with open(file_name, 'w') as f:
f.write(yaml.dump(FULL_CONFIG_FILE))
cli.main(['stacks', '-c', file_name])
call_args = stacks.call_args[0][0]
assert call_args.region == REGION
assert call_args.profile == PROFILE
assert call_args.stack == STACK
assert call_args.parameters == CHANGE_SET_PARAMETERS
assert call_args.tags == CHANGE_SET_STACK_TAGS
assert call_args.capabilities == CHANGE_SET_CAPABILITIES
assert call_args.role_arn == ROLE_ARN
assert call_args.vars == VARS
def test_loads_multiple_config_files(mocker, tmpdir, session):
stacks = mocker.patch('formica.cli.stacks')
file_name = 'test.config.yaml'
overwrite_file = 'overwrite.config.yaml'
with Path(tmpdir):
with open(file_name, 'w') as f:
f.write(yaml.dump(FULL_CONFIG_FILE))
with open(overwrite_file, 'w') as f:
f.write(yaml.dump(dict(stack='someotherstacktestvalue', vars=dict(OtherVar=3))))
cli.main(['stacks', '-c', file_name, overwrite_file])
call_args = stacks.call_args[0][0]
assert call_args.stack == 'someotherstacktestvalue'
assert call_args.stack != STACK
assert call_args.vars['OtherVar'] == 3
assert call_args.vars['OtherVar'] != VARS['OtherVar']
def test_prioritises_cli_args(mocker, tmpdir, session):
stacks = mocker.patch('formica.cli.new')
cli_stack = str(uuid4())
file_name = 'test.config.yaml'
with Path(tmpdir):
with open(file_name, 'w') as f:
f.write(yaml.dump(FULL_CONFIG_FILE))
cli.main(['new', '-s', cli_stack, '-c', file_name])
call_args = stacks.call_args[0][0]
assert call_args.stack == cli_stack
assert call_args.stack != STACK
def test_merges_cli_args_on_load(mocker, tmpdir, session):
stacks = mocker.patch('formica.cli.new')
param1 = str(uuid4())
param2 = str(uuid4())
file_name = 'test.config.yaml'
with Path(tmpdir):
with open(file_name, 'w') as f:
f.write(yaml.dump(FULL_CONFIG_FILE))
cli.main(['new', '--parameters', "A={}".format(param1), "D={}".format(param2), '-c', file_name])
call_args = stacks.call_args[0][0]
assert call_args.parameters == {"A": para | m1, "B": 2, 'C': True, 'D': param2}
def test_merges_vars(mocker, tmpdir, session):
stacks = mocker.patch('formica.cli.template')
param1 = str(uuid4())
file_name = 'test.config.yaml'
with Path(tmpdir):
with open(file_name, 'w') as f:
f.write(yaml.dump(FULL_CONFIG_FILE))
with open('test.template.yaml', 'w') as f:
f.write('{"Description": "{{ OtherVar }}"}')
cli.main(['template', '--vars', "OtherVar={}".format(param1), ' | -c', file_name])
call_args = stacks.call_args[0][0]
assert call_args.vars['OtherVar'] == param1
def test_exception_with_wrong_config_type(mocker, tmpdir, session, logger):
file_name = 'test.config.yaml'
with Path(tmpdir):
with open(file_name, 'w') as f:
f.write(yaml.dump({'stack': ['test', 'test2']}))
with pytest.raises(SystemExit):
cli.main(['stacks', '-c', file_name])
logger.error.assert_called_with('Config file parameter stack needs to be of type str')
def test_exception_with_forbiddeng_config_argument(mocker, tmpdir, session, logger):
file_name = 'test.config.yaml'
with Path(tmpdir):
with open(file_name, 'w') as f:
f.write(yaml.dump({'stacks': 'somestack'}))
with pytest.raises(SystemExit):
cli.main(['stacks', '-c', file_name])
logger.error.assert_called_with('Config file parameter stacks is not supported')
def test_exception_with_failed_yaml_syntax(mocker, tmpdir, session, logger):
file_name = 'test.config.yaml'
with Path(tmpdir):
with open(file_name, 'w') as f:
f.write("stacks: somestack\nprofile testprofile")
with pytest.raises(SystemExit):
cli.main(['stacks', '-c', file_name])
logger.error.assert_called()
def test_loads_empty_config_file(mocker, tmpdir, session):
stacks = mocker.patch('formica.cli.stacks')
file_name = 'test.config.yaml'
with Path(tmpdir):
with open(file_name, 'w') as f:
f.write('')
cli.main(['stacks', '-c', file_name])
|
LLNL/spack | var/spack/repos/builtin/packages/py-azure-mgmt-iotcentral/package.py | Python | lgpl-2.1 | 955 | 0.002094 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyAzureMgmtIotcentral(PythonPackage):
"""Microsoft Azure IoTCentral Management Client Library for Python."""
homepage = "https://github.com/Azure/azure-sdk-for-python"
pypi = "azure-mgmt-iotcentral/azure-mgmt-iotcentral-3.1.0.zip"
version('3.1.0', sha | 256='c175f6642be514ad0efd3dc03d09e50d923596fd9e634381793dcc46bb8a57c7')
| version('3.0.0', sha256='f6dacf442ccae2f18f1082e80bcbdcaa8c0efa2ba92b48c5db6ee01d37240047')
depends_on('py-setuptools', type='build')
depends_on('py-msrest@0.5.0:', type=('build', 'run'))
depends_on('py-msrestazure@0.4.32:1', type=('build', 'run'))
depends_on('py-azure-common@1.1:1', type=('build', 'run'))
depends_on('py-azure-mgmt-nspkg', when='^python@:2', type=('build', 'run'))
|
openstack/oslo.serialization | oslo_serialization/serializer/json_serializer.py | Python | apache-2.0 | 1,376 | 0 | # Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from oslo_serialization.serializer.base_serializer import BaseSerializer
class JSONSerializer(BaseSerializer):
"""JSON serializer based on the jsonutils module."""
def __init__(self, default=jsonutils.to_primitive, encoding='utf-8'):
self._default = default
self._encoding = encod | ing
def dump(self, obj, fp):
return jsonutils.dump(obj, fp)
def dump_as_bytes(self, ob | j):
return jsonutils.dump_as_bytes(obj, default=self._default,
encoding=self._encoding)
def load(self, fp):
return jsonutils.load(fp, encoding=self._encoding)
def load_from_bytes(self, s):
return jsonutils.loads(s, encoding=self._encoding)
|
andrei-karalionak/ggrc-core | src/service_specs/environment.py | Python | apache-2.0 | 1,401 | 0.015703 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import threading
from ggrc import db
from ggrc.app import app
from ggrc.models import create_db, drop_db
from wsgiref.simple_server import make_server
from ggrc import settings
use_migrations = True
def before_all(context):
context.base_url = 'http://localhost:9000'
create_db(use_migrations)
app.debug = False
app.testing = True
if getattr(settings, 'MEMCACHE_MECHANISM', False) is True:
from google.appengine.api import memcache
from google.appengine.ext import testbed
context.testbed = testbed.Testbed()
context.testbed.activate()
context.testbed.init_memcache_stub()
context.query_count = 0
def increment_query_count(conn, clauseelement, multiparams, params):
context.query_count += 1
from sqlalchemy import event
event.listen(db.engine, "before_execute", increment_query_count)
context.server = make_server('', 9000, app)
context.thread = threading.Thread(target=context.server.serve_forever)
| context.thread.start()
def after_all(context):
context.server.shutdown()
context.thread.join()
db.session.remove()
drop_db(use_migrations)
i | f getattr(settings, 'MEMCACHE_MECHANISM', False) is True:
from google.appengine.api import memcache
from google.appengine.ext import testbed
context.testbed.deactivate()
|
harpesichord/Door-Access | door/exception.py | Python | mit | 679 | 0.005891 | class ShirtsioError(Exception):
def __init__(self, message=None, http_body=None, http_status=None, json_body=None):
super(ShirtsioError, self).__init__(message)
self.http_body = http_body and http_body.decode('utf-8')
self.http | _status = http_status
self.json_body = json_body
class APIError(ShirtsioError):
pass
class APIConnectionError(ShirtsioError):
pass
class InvalidRequestError(ShirtsioError):
def __init__(self, message, http_body=None, http_s | tatus=None, json_body=None):
super(InvalidRequestError, self).__init__(message, http_body, http_status, json_body)
class AuthenticationError(ShirtsioError):
pass |
arky/pootle-dev | pootle/runner.py | Python | gpl-2.0 | 6,998 | 0.000286 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Zuza Software Foundation
#
# This file is part of Pootle.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import os
import sys
from optparse import OptionParser
from django.core import management
import syspath_override
#: Length for the generated :setting:`SECRET_KEY`
KEY_LENGTH = 50 |
#: Default path for the settings file
DEFAULT_SETTINGS_PATH = '~/.pootle/pootle.conf'
#: Template that will be used to initialize settings from
SETTINGS_TEMPLATE_FILENAME = 'settings/90-local.conf.sample'
|
def init_settings(settings_filepath, template_filename):
"""Initializes a sample settings file for new installations.
:param settings_filepath: The target file path where the initial settings
will be written to.
:param template_filename: Template file used to initialize settings from.
"""
dirname = os.path.dirname(settings_filepath)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
fp = open(settings_filepath, 'w')
import base64
output = open(template_filename).read()
output = output % {
'default_key': base64.b64encode(os.urandom(KEY_LENGTH)),
}
fp.write(output)
fp.close()
def parse_args(args):
"""Parses the given arguments.
:param args: List of command-line arguments as got from sys.argv.
:return: 3-element tuple: (args, command, command_args)
"""
index = None
for i, arg in enumerate(args):
if not arg.startswith('-'):
index = i
break
if index is None:
return (args, None, [])
return (args[:index], args[index], args[(index + 1):])
def configure_app(project, config_path, django_settings_module, runner_name):
"""Determines which settings file to use and sets environment variables
accordingly.
:param project: Project's name. Will be used to generate the settings
environment variable.
:param config_path: The path to the user's configuration file.
:param django_settings_module: The module that ``DJANGO_SETTINGS_MODULE``
will be set to.
:param runner_name: The name of the running script.
"""
settings_envvar = project.upper() + '_SETTINGS'
# Normalize path and expand ~ constructions
config_path = os.path.normpath(os.path.abspath(
os.path.expanduser(config_path),
)
)
if not (os.path.exists(config_path) or
os.environ.get(settings_envvar, None)):
print u"Configuration file does not exist at %r or " \
u"%r environment variable has not been set.\n" \
u"Use '%s init' to initialize the configuration file." % \
(config_path, settings_envvar, runner_name)
sys.exit(2)
os.environ.setdefault(settings_envvar, config_path)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', django_settings_module)
def run_app(project, default_settings_path, settings_template,
django_settings_module):
"""Wrapper around django-admin.py.
:param project: Project's name.
:param default_settings_path: Default filepath to search for custom
settings. This will also be used as a default location for writing
initial settings.
:param settings_template: Template file for initializing settings from.
:param django_settings_module: The module that ``DJANGO_SETTINGS_MODULE``
will be set to.
"""
sys_args = sys.argv
runner_name = os.path.basename(sys_args[0])
(args, command, command_args) = parse_args(sys_args[1:])
if not (command or args):
# XXX: Should we display a more verbose help/usage message?
print "Usage: %s [--config=/path/to/settings.conf] [command] " \
"[options]" % runner_name
sys.exit(2)
if command == 'init':
noinput = '--noinput' in command_args
if noinput:
command_args.remove('--noinput')
# Determine which config file to write
try:
import re
config_path = command_args[0]
# Remove possible initial dashes
config_path = re.sub('^-+', '', config_path)
except IndexError:
config_path = default_settings_path
config_path = os.path.expanduser(config_path)
if os.path.exists(config_path):
resp = None
if noinput:
resp = 'n'
while resp not in ('Y', 'n'):
resp = raw_input('File already exists at %r, overwrite? [nY] ' \
% config_path)
if resp == 'n':
print "File already exists, not overwriting."
return
try:
init_settings(config_path, settings_template)
except (IOError, OSError) as e:
raise e.__class__, 'Unable to write default settings file to %r' \
% config_path
print "Configuration file created at %r" % config_path
return
parser = OptionParser()
parser.add_option('--config', metavar='CONFIG',
default=default_settings_path,
help=u'Use the specified configuration file.')
parser.add_option('-v', '--version', action='store_true',
default=False,
help=u'Display version information and exit.')
(opts, opt_args) = parser.parse_args(args)
if opts.version:
from pootle import __version__
from translate import __version__ as tt_version
from django import get_version
print "Pootle %s" % __version__.sver
print "Translate Toolkit %s" % tt_version.sver
print "Django %s" % get_version()
return
configure_app(project=project, config_path=opts.config,
django_settings_module=django_settings_module,
runner_name=runner_name)
management.execute_from_command_line([runner_name, command] + command_args)
sys.exit(0)
def main():
src_dir = os.path.abspath(os.path.dirname(__file__))
settings_template = os.path.join(src_dir, SETTINGS_TEMPLATE_FILENAME)
run_app(project='pootle',
default_settings_path=DEFAULT_SETTINGS_PATH,
settings_template=settings_template,
django_settings_module='pootle.settings')
if __name__ == '__main__':
main()
|
wxmgcs/devops | alarm/models.py | Python | mit | 987 | 0.008273 | #coding: utf-8
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Log(models.Model):
title = models.CharField(max_length=255, default='', blank=True, null=True, verbose_name=u'标题')
content = models.CharField(max_length=255, default='', blank=True, null=True, verbose_name=u'内容')
peer = models.CharField(max_length=255, default='', blank=True, null=True, verbose_name=u'邮件')
create_time = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.title
class Meta:
default_permissions = ()
class Peer(models.Model):
peer = models.CharField(max_length=255, default='', blank=True, null=True, verbose_name=u'邮件')
status = models.IntegerField(default=0, blank=True, null=True, verbose_name=u'状态')
create_time = models.DateTimeField(aut | o_now=True)
def __unicode__(self):
ret | urn self.peer
class Meta:
default_permissions = () |
erget/tnsmaster | tnsnames/format.py | Python | mit | 101 | 0 | from enum import En | um
__autho | r__ = 'dirkfuchs'
class Format(Enum):
oneLine = 0
oracle = 1
|
EdLogan18/logan-repository | plugin.program.super.favourites/default.py | Python | gpl-2.0 | 93,932 | 0.010263 | #
# | Copyright (C) 2014-
# Sean Poyser (seanpoyser@gmail.com)
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHA | NTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import xbmc
import xbmcaddon
import xbmcplugin
import xbmcgui
import os
import urllib
import re
import quicknet
import player
import favourite
import history
import utils
import cache
import sfile
ADDONID = utils.ADDONID
ADDON = utils.ADDON
HOME = utils.HOME
ROOT = utils.ROOT
PROFILE = utils.PROFILE
VERSION = utils.VERSION
ICON = utils.ICON
FANART = utils.FANART
SEARCH = utils.SEARCH
BLANK = 'NULL'
GETTEXT = utils.GETTEXT
TITLE = utils.TITLE
FRODO = utils.FRODO
GOTHAM = utils.GOTHAM
HELIX = utils.HELIX
FILENAME = utils.FILENAME
FOLDERCFG = utils.FOLDERCFG
PLAYMEDIA_MODE = utils.PLAYMEDIA_MODE
ACTIVATEWINDOW_MODE = utils.ACTIVATEWINDOW_MODE
RUNPLUGIN_MODE = utils.RUNPLUGIN_MODE
ACTION_MODE = utils.ACTION_MODE
MANUAL_CMD = 'SF_MANUAL_CMD_'
#Display name no longer configurable
#DISPLAYNAME = ADDON.getSetting('DISPLAYNAME')
#if not DISPLAYNAME:
# DISPLAYNAME = 'Kodi'
DISPLAYNAME = 'Kodi'
# -----Addon Modes ----- #
_IGNORE = -10
_MAIN = -2
_SUPERSEARCH = 0 #also in capture.py
_SUPERSEARCHDEF = 10 #also in capture.py
_EXTSEARCH = 25 #used to trigger new Super Search from outside of addon
_SEPARATOR = 50
_SETTINGS = 100
_VIEWTYPE = 150
_ADDTOXBMC = 200
_XBMC = 300
_FOLDER = 400
_NEWFOLDER = 500
_PLAYMEDIA = 600
_ACTIVATEWINDOW = 650
_ACTIVATEWINDOW_XBMC = 660
_ACTIVATESEARCH = 675
_REMOVEFOLDER = 700
_REMOVEFAVE = 800
_RENAMEFOLDER = 900
_RENAMEFAVE = 1000
_THUMBFAVE = 1500
_THUMBFOLDER = 1600
_PLAYBACKMODE = 1700
_EDITTERM = 1900
_EDITFOLDER = 2000
_EDITFAVE = 2100
_SECURE = 2200
_UNSECURE = 2300
_PLAYLIST = 2400
_COLOURFOLDER = 2500
_COLOURFAVE = 2600
_RECOMMEND_KEY = 2700 #also in capture.py
_RECOMMEND_KEY_A = 2710
_RECOMMEND_IMDB = 2800
_PLAYTRAILER = 2900
_EDITSEARCH = 3000
_IMPORT = 3100
_IPLAY = 3200
_PLAYLISTFILE = 3300
_PLAYLISTITEM = 3400
_PLAYLISTBROWSE = 3500
_DELETEPLAYLIST = 3600
_COPY_TO_SF = 3700
_COPY_TO_SF_ITEM = 3800
_PLAYPLAYLIST = 3900
_URLPLAYLIST = 4000
_HISTORYSHOW = 4100
_HISTORYADD = 4200
_HISTORYREMOVE = 4300
_MANUAL = 4400
_CUT = 4500
_COPY = 4600
_PASTE = 4700
_CUTFOLDER = 4800
_COPYFOLDER = 4900
_PASTEFOLDER = 5000
_IEXPLORE = 5100
_PLAY_FILE = 5200
_PLAY_FOLDER = 5300
_PLAY_SUPER_FOLDER = 5400
_PLAY_SUPER_FOLDER_EXT = 5450
# --------------------- Addon Settings --------------------- #
SHOWNEW = ADDON.getSetting('SHOWNEW') == 'true'
SHOWXBMC = ADDON.getSetting('SHOWXBMC') == 'true'
SHOWIMPORT = ADDON.getSetting('SHOWIMPORT') == 'true'
SHOWSEP = ADDON.getSetting('SHOWSEP') == 'true'
SHOWSS = ADDON.getSetting('SHOWSS') == 'true'
SHOW_FANART = ADDON.getSetting('SHOW_FANART') == 'true'
SHOWRECOMMEND = ADDON.getSetting('SHOWRECOMMEND') == 'true'
PLAY_PLAYLISTS = ADDON.getSetting('PLAY_PLAYLISTS') == 'true'
METARECOMMEND = ADDON.getSetting('METARECOMMEND') == 'true'
SYNOPSRECOMMEND = ADDON.getSetting('SYNOPSRECOMMEND') == 'true'
RECOMMENDAUTO = ADDON.getSetting('RECOMMENDFIRST') == 'true'
INHERIT = ADDON.getSetting('INHERIT') == 'true'
REMOTE = ADDON.getSetting('REMOTE') == 'true'
SHOWIPLAY = ADDON.getSetting('SHOWIPLAY') == 'true'
SHOWIHISTORY = ADDON.getSetting('SHOWREMEMBER') == 'true'
SHOWIEXPLORE = ADDON.getSetting('SHOWIEXPLORE') == 'true'
COPY_PLAYLISTS = ADDON.getSetting('COPY_PLAYLISTS') == 'true'
ALLOW_PLAYLIST_DELETE = ADDON.getSetting('ALLOW_PLAYLIST_DELETE') == 'true'
DISABLEMOVIEVIEW = ADDON.getSetting('DISABLEMOVIEVIEW') == 'true'
ALPHA_SORT = ADDON.getSetting('ALPHA_SORT') == 'true'
LABEL_NUMERIC = ADDON.getSetting('LABEL_NUMERIC') == 'true'
DEBUG = ADDON.getSetting('DEBUG') == 'true'
DLG_MENU = ADDON.getSetting('CONTEXT_STYLE') == '1'
DEFAULT_FANART = ADDON.getSetting('DEFAULT_FANART')
VIEWTYPE = int(ADDON.getSetting('VIEWTYPE'))
CONTENTTYPE = ADDON.getSetting('CONTENTTYPE')
ART_LANDSCAPE = int(ADDON.getSetting('ART_LANDSCAPE'))
ART_BANNER = int(ADDON.getSetting('ART_BANNER'))
ART_POSTER = int(ADDON.getSetting('ART_POSTER'))
CONTENTTYPES = {}
CONTENTTYPES[GETTEXT(35029)] = 'files'
CONTENTTYPES[GETTEXT(35030)] = 'songs'
CONTENTTYPES[GETTEXT(35031)] = 'artists'
CONTENTTYPES[GETTEXT(35032)] = 'albums'
CONTENTTYPES[GETTEXT(35033)] = 'movies'
CONTENTTYPES[GETTEXT(35034)] = 'tvshows'
CONTENTTYPES[GETTEXT(35035)] = 'episodes'
CONTENTTYPES[GETTEXT(35036)] = 'musicvideos'
CONTENTTYPES[GETTEXT(35037)] = ''
if ADDON.getSetting('SHOW_STARTUP_TXT') == 'true':
utils.DialogOK(ADDON.getSetting('STARTUP_TXT'))
ADDON.setSetting('SHOW_STARTUP_TXT', 'false')
if REMOTE:
LOCATION = len(ADDON.getSetting('LOCATION')) > 0
else:
LOCATION = False
if DEFAULT_FANART == '1':
FANART = ADDON.getSetting('DEFAULT_IMAGE')
if DEFAULT_FANART == '2':
FANART = BLANK
CONTENTMODE = False
ISEARCH_EMPTY = '__iSearch__'
NUMBER_SEP = ' | '
# ---------------------------------------------------------- #
utils.CheckVersion()
global nItem
nItem = 0
global separator
separator = False
global currentFolder
currentFolder = PROFILE
def main():
addMainItems()
parseFolder(PROFILE)
def setViewType():
#logic to obtain viewtype inspired by lambda
path = 'special://skin/'
addon = os.path.join(path, 'addon.xml')
xml = sfile.read(addon).replace('\n','').replace('\t','')
try: src = re.compile('defaultresolution="(.+?)"').findall(xml)[0]
except: src = re.compile('<res.+?folder="(.+?)"').findall(xml)[0]
types = ['MyVideoNav.xml', 'MyMusicNav.xml', 'MyPrograms.xml', 'Includes_View_Modes.xml', 'IncludesViews.xml']
views = []
for type in types:
view = os.path.join(path, src, type)
view = sfile.read(view).replace('\n','').replace('\t','')
try:
view = re.compile('<views>(.+?)</views>').findall(view)[0].split(',')
for v in view:
v = int(v)
if v not in views:
views.append(v)
except:
pass
for view in views:
label = xbmc.getInfoLabel('Control.GetLabel(%d)' % view)
if label:
ADDON.setSetting('VIEWTYPE', str(view))
return True
return False
def addSuperSearch():
global separator
if not SHOWSS:
return
separator = False
addDir(GETTEXT(30054), _SUPERSEARCH, thu |
Steven-Eardley/pibot_motor | Moves.py | Python | mit | 2,279 | 0.002633 | """Some primitive movement definitions"""
import Robot
def go_straight(speed=50, distance=0):
"""
Travel forwards or backwards in a straight line.
:param speed: Speed to travel, as % of maximum
:param distance: Distance to travel, in millimetres. +ve for forwards, -ve for backwards
:return: 0 for success; 1 for failure
"""
power = speed_to_power(speed)
r = Robot.get_robot()
r.ctl.SetMotor1(power + Robot.R_OFFSET)
r.ctl.SetMotor2(power)
print "straight:\t{0} | mm at {1}% speed".format(distance, speed)
def rotate(speed=50, degrees=0):
"""
Rotate the bot while stationary.
:param degrees: Number of degrees to rotate, +ve for clockwise, -ve for anticlockwise
:return: 0 for success; 1 for failure
"""
r = Robot.get_robot()
print "rotate:\t{0} deg at {1}% speed".format(degree | s, speed)
def rotate_min(speed=50, heading=0):
"""
Slightly smarter stationary rotate method which chooses its own direction of rotation
:param heading: New heading to point towards (0-360)
:return: 0 for success; 1 for failure
"""
r = Robot.get_robot()
heading = heading % 360
if 180 - heading >= 0:
rotate(speed, heading)
else:
rotate(speed, -(360 - heading))
def stop():
"""
Stop the robot
:return: 0 for success; 1 for failure
"""
r = Robot.get_robot()
r.stop()
##################################
# Helper functions for movement #
##################################
def speed_to_power(speed_percentage):
"""Get the motor power setting for a given speed"""
return Robot.MAX_POWER * (speed_percentage / 100.0)
if __name__ == "__main__":
""" Do a quick test if this file is executed """
control_sequence = [
lambda: go_straight(distance=10),
lambda: go_straight(distance=-10),
lambda: rotate(degrees=90),
lambda: rotate(degrees=-90),
lambda: rotate_min(heading=180),
lambda: rotate_min(heading=180),
lambda: rotate_min(heading=315),
lambda: rotate_min(heading=45),
lambda: rotate_min(heading=60),
lambda: rotate_min(heading=300)
]
test_robot = Robot.get_robot(init_commands=control_sequence)
test_robot.cs.exec_stack(wait_time=1)
|
bigswitch/horizon | openstack_dashboard/dashboards/project/stacks/forms.py | Python | apache-2.0 | 18,998 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import django
from django.conf import settings
from django.utils import html
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from oslo_utils import strutils
import six
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images \
import utils as image_utils
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
LOG = logging.getLogger(__name__)
def create_upload_form_attributes(prefix, input_type, name):
"""Creates attribute dicts for the switchable upload form
:type prefix: str
:param prefix: prefix (environment, template) of field
:type input_type: str
:param input_type: field type (file, raw, url)
:type name: str
:param name: translated text label to display to user
:rtype: dict
:return: an attribute set to pass to form build
"""
attributes = {'class': 'switched', 'data-switch-on': prefix + 'source'}
attributes['data-' + prefix + 'source-' + input_type] = name
return attributes
class TemplateForm(forms.SelfHandlingForm):
class Meta(object):
name = _('Select Template')
help_text = _('Select a template to launch a stack.')
# TODO(jomara) - update URL choice for template & environment files
# w/ client side download when applicable
base_choices = [('file', _('File')),
('raw', _('Direct Input'))]
url_choice = [('url', _('URL'))]
attributes = {'class': 'switchable', 'data-slug': 'templatesource'}
template_source = forms.ChoiceField(label=_('Template Source'),
choices=base_choices + url_choice,
widget=forms.Select(attrs=attributes))
attributes = create_upload_form_attributes(
'template',
'file',
_('Template File'))
template_upload = forms.FileField(
label=_('Template File'),
help_text=_('A local template to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'url',
_('Template URL'))
template_url = forms.URLField(
label=_('Template URL'),
help_text=_('An external (HTTP) URL to load the template from.'),
widget=forms.TextInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'raw',
_('Template Data'))
template_data = forms.CharField(
label=_('Template Data'),
help_text=_('The raw contents of the template.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
attributes = {'data-slug': 'envsource', 'class': 'switchable'}
environment_source = forms.ChoiceField(
label=_('Environment Source'),
choices=base_choices,
widget=forms.Select(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'file',
_('Environment File'))
environment_upload = forms.FileField(
label=_('Environment File'),
help_text=_('A local environment to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'raw',
_('Environment Data'))
environment_data = forms.CharField(
label=_('Environment Data'),
help_text=_('The raw contents of the environment file.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
if django.VERSION >= (1, 9):
# Note(Itxaka): On django>=1.9 Charfield has an strip option that
# we need to set to False as to not hit
# https://bugs.launchpad.net/python-heatclient/+bug/1546166
environment_data.strip = False
template_data.strip = False
def __init__(self, *args, **kwargs):
self.next_view = kwargs.pop('next_view')
super(TemplateForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned = super(TemplateForm, self).clean()
files = self.request.FILES
self.clean_uploaded_files('template', _('template'), cleaned, files)
self.clean_uploaded_files('environment', _('environment'), cleaned,
files)
# Validate the template and get back the params.
kwargs = {}
if cleaned['environment_data']:
kwargs['environment'] = cleaned['environment_data']
try:
files, tpl =\
api.heat.get_template_files(cleaned.get('template_data'),
cleaned.get('template_url'))
kwargs['files'] = files
kwargs['template'] = tpl
validated = api.heat.template_validate(self.request, **kwargs)
cleaned['template_validate'] = validated
cleaned['template_validate']['files'] = files
cleaned['template_validate']['template'] = tpl
except Exception as e:
| raise forms.ValidationError(six.text_type(e))
return cleaned
def clean_uploaded_files(self, prefix, field_label, cleaned, files):
"""Cleans Template & Environment data from form upload.
Does some of the crunchy bits for processing uploads vs raw
data depending on what the user specified. Identical process
| for environment data & template data.
:type prefix: str
:param prefix: prefix (environment, template) of field
:type field_label: str
:param field_label: translated prefix str for messages
:type input_type: dict
:param prefix: existing cleaned fields from form
:rtype: dict
:return: cleaned dict including environment & template data
"""
upload_str = prefix + "_upload"
data_str = prefix + "_data"
url = cleaned.get(prefix + '_url')
data = cleaned.get(prefix + '_data')
has_upload = upload_str in files
# Uploaded file handler
if has_upload and not url:
log_template_name = files[upload_str].name
LOG.info('got upload %s' % log_template_name)
tpl = files[upload_str].read()
if tpl.startswith('{'):
try:
json.loads(tpl)
except Exception as e:
msg = _('There was a problem parsing the'
' %(prefix)s: %(error)s')
msg = msg % {'prefix': prefix, 'error': six.text_type(e)}
raise forms.ValidationError(msg)
cleaned[data_str] = tpl
# URL handler
elif url and (has_upload or data):
msg = _('Please specify a %s using only one source method.')
msg = msg % field_label
raise forms.ValidationError(msg)
elif prefix == 'template':
# Check for raw template input - blank environment allowed
if not url and not data:
msg = _('You must specify a template via one of the '
'available sources.')
raise forms.ValidationError(msg)
def create_kwargs(self, data):
kwargs = {'parameters': data['template_validate'],
'environment_data': data['environment_data']}
if data.get('stack_id'):
kwargs['stack_id'] = |
pagea/unstyle | unstyle/features/characterfeatures.py | Python | mit | 526 | 0 | """Module containing character feature extractors."""
import string
from unstyle.features.featregister import register_feat
@register_feat
def characterSpace(text):
"""Return the total number of characters | ."""
return len(text)
@register_feat
def letterSpace(text):
"""Return the total number of letters (excludes spaces and punctuation)" | ""
count = 0
alphabet = string.ascii_lowercase + string.ascii_uppercase
for char in text:
if char in alphabet:
count += 1
return count
|
wiliamsouza/cars | cars/__init__.py | Python | apache-2.0 | 715 | 0.002797 | import os
| from flask import Flask
from flask.ext.mongoengine import MongoEngine
from flask.ext.login import LoginManager
UPLOAD_FOLDER = '/srv/cars/cars/data/images'
if 'TRAVIS' in os.environ:
UPLOAD_FOLDER = '{0}/{1}'.format(os.environ['TRAVIS_BUILD_DIR'],
'cars/data/images')
app = Flask(__name__)
#app.config['DEBUG'] = True
app.config['MONGODB_SETTINGS'] = {'DB': 'cars'}
app.config['SECRET_KEY'] = 'super_secre | t_key'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
db = MongoEngine(app)
db.connection.admin.command('setParameter', textSearchEnabled=True)
lm = LoginManager(app)
lm.init_app(app)
lm.login_view = 'login'
from . import controllers
__version__ = '0.1'
|
maxwward/SCOPEBak | askbot/migrations/0076_transplant_followed_by_2.py | Python | gpl-3.0 | 27,054 | 0.008354 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field followed_by on 'Exercise'
db.delete_table('exercise_followed_by')
def backwards(self, orm):
# Adding M2M table for field followed_by on 'Exercise'
db.create_table(u'exercise_followed_by', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('exercise', models.ForeignKey(orm['askbot.exercise'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique(u'exercise_followed_by', ['exercise_id', 'user_id'])
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Exercise']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousproblem': {
'Meta': {'object_name': 'AnonymousProblem'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_problems'", 'to': "orm['askbot.Exercise']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousexercise': {
'Meta': {'object_name': 'AnonymousExercise'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.problem': {
'Meta': {'object_name': 'Problem', 'db_table': "u'problem'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'problems'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': | 'True', 'related_name': "'locked_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'exercise': ('dj | ango.db.models.fields.related.ForeignKey', [], {'related_name': "'problems'", 'to': "orm['askbot.Exercise']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user' |
fitzgen/zoolander | zoolander/colors.py | Python | mit | 1,572 | 0.002545 | """
Zoolander's color handling module.
"""
import operator
class HexMetaClass(type):
OPERATORS = ("add", "sub", "mul", "div")
def __new__(mcs, name, bases, attrs):
cls = type.__new__(mcs, name, bases, attrs)
def overload(op):
"""
Overload this operator with a function that will take other
instances of CssUnit, integers, or floats.
"""
def overloaded_fn(self, other):
my_class = type(self)
op_fn = getattr(operator, op)
if isin | stance(other, my_class):
return my_class(hex(op_fn(self.num, other.num)))
elif isinstance(other, int) or isinstance(other, float):
return my_class(hex(op_fn(self.num, other)))
elif isinstance(other, str):
return my_class(hex(self.num + int(other, 16)))
else:
raise TypeError("Unsupported type | s for %s, '%s' and '%s'" % (
str(op_fn), type(self), type(other)
))
return overloaded_fn
for op in mcs.OPERATORS:
setattr(cls, "__%s__" % op, overload(op))
setattr(cls, "__r%s__" % op, "__%s__" % op)
return cls
class Hex(object):
__metaclass__ = HexMetaClass
def __init__(self, hex_string):
if hex_string[0] == "#":
hex_string = hex_string[1:]
self.num = int(hex_string, 16)
def __str__(self):
return "#%s" % hex(self.num).replace("0x", "")
|
alirizakeles/tendenci | tendenci/apps/navs/utils.py | Python | gpl-3.0 | 866 | 0.008083 | from django.core.cache import cache
from django.conf import settings
from django.template.loader import render_to_string
from tendenci.apps.navs.cache import NAV_PRE_KEY
def cache_nav(nav, show_title=False):
"""
Caches a nav's rendered html code
"""
keys = [settings.CACHE_PRE_ | KEY, NAV_PRE_KEY, str(nav.id)]
key = '.'.join(keys)
value = render_to_string("navs/render_nav.html", {'nav':nav, "show_title": show_title})
cache.set(key, value, 432000) #5 days
return value
def get_nav(id):
"""
Get the nav from | the cache.
"""
keys = [settings.CACHE_PRE_KEY, NAV_PRE_KEY, str(id)]
key = '.'.join(keys)
nav = cache.get(key)
return nav
def clear_nav_cache(nav):
"""
Clear nav cache
"""
keys = [settings.CACHE_PRE_KEY, NAV_PRE_KEY, str(nav.id)]
key = '.'.join(keys)
cache.delete(key)
|
Microsoft/PTVS | Python/Product/Pyvot/Pyvot/xl/version.py | Python | apache-2.0 | 704 | 0.005682 | # PyVot
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www. | apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LI | MITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
__version__ = "0.1.2" |
goulu/pdfminer | tools/pdfdiff.py | Python | mit | 5,794 | 0 | #!/usr/bin/env python3
"""
compares two pdf files.
"""
import io
import logging
import sys
import pdfminer.settings
from pdfminer import high_level, layout
pdfminer.settings.STRICT = False
logging.basicConfig()
def compare(file1, file2, **kwargs):
# If any LAParams group arguments were passed,
# create an LAParams object and
# populate with given args. Otherwise, set it to None.
if kwargs.get('laparams', None) is None:
laparams = layout.LAParams()
for param in ("all_texts", "detect_vertical", "word_margin",
"char_margin", "line_margin", "boxes_flow"):
para | mv = kwargs.get(param, None)
if paramv is not None:
laparams[param] = paramv
kwargs['laparams'] = laparams
s1 = io.StringIO()
with open(file1, "rb") as fp:
high_level.extract_text_to_fp(fp, s1, **kwargs)
s2 = io.StringIO()
with open(file2, "rb") as fp:
high_level.extract_text_to_fp(fp, s2, **kwargs)
import difflib
s1.seek(0)
s2.seek(0)
s1, s2 = s1.readlines(), s2.readlines()
import os.path
try: |
extension = os.path.splitext(kwargs['outfile'])[1][1:4]
if extension.lower() == 'htm':
return difflib.HtmlDiff().make_file(s1, s2)
except KeyError:
pass
return difflib.unified_diff(s1, s2, n=kwargs['context_lines'])
# main
def main(args=None):
import argparse
P = argparse.ArgumentParser(description=__doc__)
P.add_argument("file1", type=str, default=None, help="File 1 to compare.")
P.add_argument("file2", type=str, default=None, help="File 2 to compare.")
P.add_argument("-o", "--outfile", type=str, default="-",
help="Output file(default/'-' is stdout) if .htm or .html,"
" create an HTML table (or a complete HTML file "
"containing the table) showing a side by side, "
"line by line comparison of text with inter-line and "
"intra-line change highlights. The table can be "
"generated in either full or "
"contextual difference mode.")
P.add_argument("-N", "--context-lines", default=3, type=int,
help="context lines shown")
P.add_argument("-d", "--debug", default=False, action="store_true",
help="Debug output.")
# params for pdf2txt
P.add_argument("-p", "--pagenos", type=str,
help="Comma-separated list of page numbers to parse. "
"Included for legacy applications, "
"use --page-numbers for more "
"idiomatic argument entry.")
P.add_argument("--page-numbers", type=int, default=None, nargs="+",
help="Alternative to --pagenos with space-separated "
"numbers; supercedes --pagenos where it is used.")
P.add_argument("-m", "--maxpages", type=int, default=0,
help="Maximum pages to parse")
P.add_argument("-P", "--password", type=str, default="",
help="Decryption password for both PDFs")
P.add_argument("-t", "--output_type", type=str, default="text",
help="pdf2txt type: text|html|xml|tag (default is text)")
P.add_argument("-c", "--codec", type=str, default="utf-8",
help="Text encoding")
P.add_argument("-s", "--scale", type=float, default=1.0, help="Scale")
P.add_argument("-A", "--all-texts", default=None, action="store_true",
help="LAParams all texts")
P.add_argument("-V", "--detect-vertical", default=None,
action="store_true", help="LAParams detect vertical")
P.add_argument("-W", "--word-margin", type=float, default=None,
help="LAParams word margin")
P.add_argument("-M", "--char-margin", type=float, default=None,
help="LAParams char margin")
P.add_argument("-L", "--line-margin", type=float, default=None,
help="LAParams line margin")
P.add_argument("-F", "--boxes-flow", type=float, default=None,
help="LAParams boxes flow")
P.add_argument("-Y", "--layoutmode", default="normal", type=str,
help="HTML Layout Mode")
P.add_argument("-n", "--no-laparams", default=False,
action="store_true", help="Pass None as LAParams")
P.add_argument("-R", "--rotation", default=0, type=int,
help="Rotation")
P.add_argument("-O", "--output-dir", default=None,
help="Output directory for images")
P.add_argument("-C", "--disable-caching", default=False,
action="store_true", help="Disable caching")
P.add_argument("-S", "--strip-control", default=False,
action="store_true", help="Strip control in XML mode")
A = P.parse_args(args=args)
if A.debug:
logging.getLogger().setLevel(logging.DEBUG)
if A.page_numbers:
A.page_numbers = {x-1 for x in A.page_numbers}
if A.pagenos:
A.page_numbers = {int(x)-1 for x in A.pagenos.split(",")}
if A.output_type == "text" and A.outfile != "-":
for override, alttype in ((".htm", "html"),
(".html", "html"),
(".xml", "xml"),
(".tag", "tag")):
if A.outfile.endswith(override):
A.output_type = alttype
if A.outfile == "-":
outfp = sys.stdout
else:
outfp = open(A.outfile, "w", encoding='utf-8')
outfp.writelines(compare(**vars(A)))
outfp.close()
return 0
if __name__ == '__main__':
sys.exit(main())
|
sniboboof/data-structures | radixal.py | Python | mit | 1,895 | 0.005277 | def radixsort(mylist):
if mylist == []:
return []
answerdict = {}
for item in mylist:
try:
answerdict[item%10].append(item)
except KeyError:
answerdict[item%10] = [item, ]
i = 1
while len(answerdict.keys()) > 1:
answerdict = radixhelper(answerdict, i)
i += 1
answerlist = answerdict.values()[0]
return answerlist
def radixhelper(indict, digit):
outdict={}
for bucketlist in indict.values():
for number in bucketlist:
exactdigit = numbe | r
for i in xrange(digit):
exactdigit = exactdigit // 10
exactdigit %= 10
try:
outdict[exactdigit].append(number)
except KeyError:
outdict[exactdigit] = [number, ]
return outdict
def radixsortstring(mylist):
if mylist == []:
return []
answerdict={'input':mylist}
maxlength = 0
for singlestring in mylist:
maxlength = max(maxl | ength, len(singlestring))
for i in xrange(maxlength):
answerdict = radixstringhelper(answerdict, maxlength-(i+1))
answerlist=[]
for bucket in answerdict.values():
answerlist.extend(bucket)
return answerlist
def radixstringhelper(indict, index):
shorties = indict.pop('shorties', [])
outdict = {'shorties':[]}
buckets = [shorties, ]
for key in sorted(indict.keys()):
buckets.append(indict[key])
for bucketlist in buckets:
for onestring in bucketlist:
try:
outdict[onestring[index]].append(onestring)
except KeyError:
outdict[onestring[index]] = [onestring, ]
except IndexError:
outdict['shorties'].append(onestring)
worthlessstring = "alpha beta gamma delta epsilon zeta eta theta iota kappa lambda mu nu"
return outdict
|
duncanmmacleod/gwsumm | gwsumm/tabs/misc.py | Python | gpl-3.0 | 3,545 | 0 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2013-2016)
#
# This file is part of GWSumm
#
# GWSumm is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWSumm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWSumm. If not, see <http://www.gnu.org/licenses/>
"""This module defines some utility `Tab` subclasses, including HTTP
error handlers.
"""
from MarkupPy import markup
from .registry import (get_tab, register_tab)
from gwdetchar.io import html
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
__all__ = ['AboutTab', 'Error404Tab']
Tab = get_tab('basic')
# -- About --------------------------------------------------------------------
class AboutTab(Tab):
"""Page describing how the containing HTML pages were generated
"""
type = 'about'
def __init__(self, name='About', **kwargs):
super(AboutTab, self).__init__(name, **kwargs)
def write_html(self, config=list(), **kwargs):
return super(AboutTab, self).write_html(
html.about_this_page(config=config), **kwargs)
register_tab(AboutTab)
# -- HTTP errors --------------------------------------------------------------
class Error404Tab(Tab):
"""Custom HTTP 404 error page
"""
type = '404'
def __init__(self, name='404', **kwargs):
super(Error404Tab, self).__init__(name, **kwargs)
def write_html(self, config=list(), top=None, **kwargs):
if top is None:
top = kwargs.get('base', self.path)
kwargs.setdefault('title', '404: Page | not found')
page = markup.page()
pag | e.div(class_='alert alert-danger')
page.p()
page.strong("The page you are looking for doesn't exist")
page.p.close()
page.p("This could be because the times for which you are looking "
"were never processed (or haven't even happened yet), or "
"because no page exists for the specific data products you "
"want. Either way, if you think this is in error, please "
"contact <a class=\"alert-link\" "
"href=\"mailto:detchar+code@ligo.org\">the DetChar group</a>.")
page.p("Otherwise, you might be interested in one of the following:")
page.div(style="padding-top: 10px;")
page.a("Take me back", role="button", class_="btn btn-lg btn-info",
title="Back", href="javascript:history.back()")
page.a("Take me up one level", role="button",
class_="btn btn-lg btn-warning", title="Up",
href="javascript:linkUp()")
page.a("Take me to the top level", role="button",
class_="btn btn-lg btn-success", title="Top", href=top)
page.div.close()
page.div.close()
page.script("""
function linkUp() {
var url = window.location.href;
if (url.substr(-1) == '/') url = url.substr(0, url.length - 2);
url = url.split('/');
url.pop();
window.location = url.join('/');
}""", type="text/javascript")
return super(Error404Tab, self).write_html(page, **kwargs)
register_tab(Error404Tab)
|
thombashi/subprocrunner | setup.py | Python | mit | 2,928 | 0.000683 | """
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import os.path
from typing import Dict
import setuptools
MODULE_NAME = "subprocrunner"
REPOSITORY_URL = "https://github.com/thombashi/{:s}".format(MODULE_NAME)
REQUIREMENT_DIR = "requirements"
ENCODING = "utf8"
pkg_info = {} # type: Dict[str, str]
def get_release_command_class() -> Dict[str, setuptools.Command]:
try:
from releasecmd import ReleaseCommand
except ImportError:
return {}
return {"release": ReleaseCommand}
with open(os.path.join(MODULE_NAME, "__version__.py")) as f:
exec(f.read(), pkg_info)
with open("README.rst", encoding=ENCODING) as fp:
long_description = fp.read()
with open(os.path.join(REQUIREMENT_DIR, "requirements.txt")) as f:
install_requires = [line.strip() for line in f if line.strip()]
with open(os.path.join(REQUIREMENT_DIR, "test_requirements.txt")) as f:
tests_requires = [line.strip() for line in f if line.strip()]
LOGGING_REQUIRES = ["loguru>=0.4.1,<1"]
setuptools.setup(
name=MODULE_NAME,
version=pkg_info["__version__"],
url=REPOSITORY_URL,
author=pkg_info["__author__"],
author_email=pkg_info["__email__"],
description="A Python wrapper library for subprocess module.",
include_package_data=True,
keywords=["library", "subprocess"],
license=pkg_info["__license__"],
long_description=long_description,
long_description_content_type="text/x-rst",
packages=setuptools.find_packages(exclude=["test*"]),
package_data={MODULE_NAME: ["py.typed"]},
project_urls={"Source | ": REPOSITORY_URL, "Tracker": "{:s}/issues".format(REPOSITORY_URL)},
python_requires=">=3.5",
install_requires=install_requires,
extr | as_require={"logging": LOGGING_REQUIRES, "test": tests_requires + LOGGING_REQUIRES},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
cmdclass=get_release_command_class(),
)
|
eduNEXT/edunext-platform | import_shims/lms/grades/rest_api/v1/__init__.py | Python | agpl-3.0 | 377 | 0.01061 | """Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable | =redefined-builtin,wrong- | import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('grades.rest_api.v1', 'lms.djangoapps.grades.rest_api.v1')
from lms.djangoapps.grades.rest_api.v1 import *
|
kennydude/django-pgviews | django_pgviews/management/commands/clear_pgviews.py | Python | unlicense | 1,195 | 0.000837 | import logging
from django.core.management.base import BaseComm | and
from django.apps import apps
from django.db import connection
from django_pgviews.view import clear_view, View, MaterializedView
log = logging.getLogger('django_pgviews.sync_pgviews')
class Command(BaseCommand):
help = """Cl | ear Postgres views. Use this before running a migration"""
def handle(self, **options):
"""
"""
for view_cls in apps.get_models():
if not (isinstance(view_cls, type) and
issubclass(view_cls, View) and
hasattr(view_cls, 'sql')):
continue
python_name = '{}.{}'.format(view_cls._meta.app_label, view_cls.__name__)
status = clear_view(
connection, view_cls._meta.db_table,
materialized=isinstance(view_cls(), MaterializedView))
if status == 'DROPPED':
msg = 'dropped'
else:
msg = 'not dropped'
log.info("%(python_name)s (%(view_name)s): %(msg)s" % {
'python_name': python_name,
'view_name': view_cls._meta.db_table,
'msg': msg})
|
antoinecarme/pyaf | tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_MovingAverage_Seasonal_DayOfWeek_MLP.py | Python | bsd-3-clause | 174 | 0.045977 | import tests.model_control.test_ozone_custom_models_ | enabled as testmod
testmod.build_model( ['RelativeDi | fference'] , ['MovingAverage'] , ['Seasonal_DayOfWeek'] , ['MLP'] ); |
mfnch/pyrtist | pyrtist/lib2deep/put.py | Python | lgpl-2.1 | 2,455 | 0.000815 | # Copyright (C) 2017 Matteo Franchin
#
# This file is part of Pyrtist.
# Pyrtist is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# Pyrtist is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even | the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# | along with Pyrtist. If not, see <http://www.gnu.org/licenses/>.
'''Implementation of Put functionality for DeepWindows.
This module allows passing Put objects to DeepWindow in order to place a
DeepWindow inside another DeepWindow, similarly to what is done for Windows
in the pyrtist.lib2d module.
'''
__all__ = ('SimplePut', 'Put', 'Near')
from ..lib2d import combination, SimplePut, Put, Near
from .core_types import Point, Point3, DeepMatrix
from .deep_window import DeepWindow
from .cmd_stream import DeepCmdArgFilter
@combination(SimplePut, DeepWindow, 'SimplePut')
def simple_put_at_deep_window(simple_put, deep_window):
flt = DeepCmdArgFilter.from_matrix(simple_put.matrix or DeepMatrix())
deep_window.cmd_stream.add(simple_put.get_window().cmd_stream, flt)
@combination(DeepWindow, Put)
def deep_window_at_put(deep_window, put):
put.window = deep_window
@combination(Put, DeepWindow, 'Put')
def put_at_deep_window(put, deep_window):
xy_constraints = []
z_constraints = []
for c in put.constraints:
src, dst, weight = (c.src, Point3(c.dst), float(c.weight))
if not isinstance(src, (Point, Point3)):
reference_point = put.window.get(src)
if reference_point is None:
raise ValueError('Cannot find reference point {}'
.format(repr(src)))
src = reference_point
src = Point3(src)
xy_constraints.append(Near(src.get_xy(), dst.get_xy(), weight))
z_constraints.append((src.z, dst.z, weight))
# Calculate the xy part of the matrix.
t = put.auto_transform.calculate(put.transform, xy_constraints)
mx = t.get_matrix()
flt = DeepCmdArgFilter.from_matrix(mx)
deep_window.cmd_stream.add(put.window.cmd_stream, flt)
|
akhof/PWA | PWA/src/PWA/Response.py | Python | mit | 288 | 0.010417 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
class Response():
def __init__ | (self, server):
self._server = server
| self.response_code = 200
self.content_type = None
self.content = ""
self.headers = {}
self.cookies = {}
|
portify/io_scene_dts | import_dts.py | Python | mit | 18,549 | 0.002696 | import bpy
import os
from bpy_extras.io_utils import unpack_list
from .DtsShape import DtsShape
from .DtsTypes import *
from .write_report import write_debug_report
from .util import default_materials, resolve_texture, get_rgb_colors, fail, \
ob_location_curves, ob_scale_curves, ob_rotation_curves, ob_rotation_data, evaluate_all
import operator
from itertools import zip_longest, count
from functools import reduce
from random import random
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def dedup_name(group, name):
if name not in group:
return name
for suffix in count(2):
new_name = name + "#" + str(suffix)
if new_name not in group:
return new_name
def import_material(color_source, dmat, filepath):
bmat = bpy.data.materials.new(dedup_name(bpy.data.materials, dmat.name))
bmat.diffuse_intensity = 1
texname = resolve_texture(filepath, dmat.name)
if texname is not None:
try:
teximg = bpy.data.images.load(texname)
except:
print("Cannot load image", texname)
texslot = bmat.texture_slots.add()
texslot.use_map_alpha = True
tex = texslot.texture = bpy.data.textures.new(dmat.name, "IMAGE")
tex.image = teximg
# Try to figure out a diffuse color for solid shading
if teximg.size[0] <= 16 and teximg.size[1] <= 16:
if teximg.use_alpha:
pixels = grouper(teximg.pixels, 4)
else:
pixels = grouper(teximg.pixels, 3)
color = pixels.__next__()
for other in pixels:
if other != color:
break
else:
bmat.diffuse_color = color[:3]
elif dmat.name.lower() in default_materials:
| bmat.diffuse_color = default_materials[dmat.name.lower()]
else: # give it a random color
bmat.diffuse_color = color_source.__next__()
| if dmat.flags & Material.SelfIlluminating:
bmat.use_shadeless = True
if dmat.flags & Material.Translucent:
bmat.use_transparency = True
if dmat.flags & Material.Additive:
bmat.torque_props.blend_mode = "ADDITIVE"
elif dmat.flags & Material.Subtractive:
bmat.torque_props.blend_mode = "SUBTRACTIVE"
else:
bmat.torque_props.blend_mode = "NONE"
if dmat.flags & Material.SWrap:
bmat.torque_props.s_wrap = True
if dmat.flags & Material.TWrap:
bmat.torque_props.t_wraps = True
if dmat.flags & Material.IFLMaterial:
bmat.torque_props.use_ifl = True
# TODO: MipMapZeroBorder, IFLFrame, DetailMap, BumpMap, ReflectanceMap
# AuxilaryMask?
return bmat
class index_pass:
def __getitem__(self, item):
return item
def create_bmesh(dmesh, materials, shape):
me = bpy.data.meshes.new("Mesh")
faces = []
material_indices = {}
indices_pass = index_pass()
for prim in dmesh.primitives:
if prim.type & Primitive.Indexed:
indices = dmesh.indices
else:
indices = indices_pass
dmat = None
if not (prim.type & Primitive.NoMaterial):
dmat = shape.materials[prim.type & Primitive.MaterialMask]
if dmat not in material_indices:
material_indices[dmat] = len(me.materials)
me.materials.append(materials[dmat])
if prim.type & Primitive.Strip:
even = True
for i in range(prim.firstElement + 2, prim.firstElement + prim.numElements):
if even:
faces.append(((indices[i], indices[i - 1], indices[i - 2]), dmat))
else:
faces.append(((indices[i - 2], indices[i - 1], indices[i]), dmat))
even = not even
elif prim.type & Primitive.Fan:
even = True
for i in range(prim.firstElement + 2, prim.firstElement + prim.numElements):
if even:
faces.append(((indices[i], indices[i - 1], indices[0]), dmat))
else:
faces.append(((indices[0], indices[i - 1], indices[i]), dmat))
even = not even
else: # Default to Triangle Lists (prim.type & Primitive.Triangles)
for i in range(prim.firstElement + 2, prim.firstElement + prim.numElements, 3):
faces.append(((indices[i], indices[i - 1], indices[i - 2]), dmat))
me.vertices.add(len(dmesh.verts))
me.vertices.foreach_set("co", unpack_list(dmesh.verts))
me.vertices.foreach_set("normal", unpack_list(dmesh.normals))
me.polygons.add(len(faces))
me.loops.add(len(faces) * 3)
me.uv_textures.new()
uvs = me.uv_layers[0]
for i, ((verts, dmat), poly) in enumerate(zip(faces, me.polygons)):
poly.use_smooth = True # DTS geometry is always smooth shaded
poly.loop_total = 3
poly.loop_start = i * 3
if dmat:
poly.material_index = material_indices[dmat]
for j, index in zip(poly.loop_indices, verts):
me.loops[j].vertex_index = index
uv = dmesh.tverts[index]
uvs.data[j].uv = (uv.x, 1 - uv.y)
me.validate()
me.update()
return me
def file_base_name(filepath):
return os.path.basename(filepath).rsplit(".", 1)[0]
def insert_reference(frame, shape_nodes):
for node in shape_nodes:
ob = node.bl_ob
curves = ob_location_curves(ob)
for curve in curves:
curve.keyframe_points.add(1)
key = curve.keyframe_points[-1]
key.interpolation = "LINEAR"
key.co = (frame, ob.location[curve.array_index])
curves = ob_scale_curves(ob)
for curve in curves:
curve.keyframe_points.add(1)
key = curve.keyframe_points[-1]
key.interpolation = "LINEAR"
key.co = (frame, ob.scale[curve.array_index])
_, curves = ob_rotation_curves(ob)
rot = ob_rotation_data(ob)
for curve in curves:
curve.keyframe_points.add(1)
key = curve.keyframe_points[-1]
key.interpolation = "LINEAR"
key.co = (frame, rot[curve.array_index])
def load(operator, context, filepath,
reference_keyframe=True,
import_sequences=True,
use_armature=False,
debug_report=False):
shape = DtsShape()
with open(filepath, "rb") as fd:
shape.load(fd)
if debug_report:
write_debug_report(filepath + ".txt", shape)
with open(filepath + ".pass.dts", "wb") as fd:
shape.save(fd)
# Create a Blender material for each DTS material
materials = {}
color_source = get_rgb_colors()
for dmat in shape.materials:
materials[dmat] = import_material(color_source, dmat, filepath)
# Now assign IFL material properties where needed
for ifl in shape.iflmaterials:
mat = materials[shape.materials[ifl.slot]]
assert mat.torque_props.use_ifl == True
mat.torque_props.ifl_name = shape.names[ifl.name]
# First load all the nodes into armatures
lod_by_mesh = {}
for lod in shape.detail_levels:
lod_by_mesh[lod.objectDetail] = lod
node_obs = []
node_obs_val = {}
if use_armature:
root_arm = bpy.data.armatures.new(file_base_name(filepath))
root_ob = bpy.data.objects.new(root_arm.name, root_arm)
root_ob.show_x_ray = True
context.scene.objects.link(root_ob)
context.scene.objects.active = root_ob
# Calculate armature-space matrix, head and tail for each node
for i, node in enumerate(shape.nodes):
node.mat = shape.default_rotations[i].to_matrix()
node.mat = Matrix.Translation(shape.default_translations[i]) * node.mat.to_4x4()
if node.parent != -1:
node.mat = shape.nodes[node.parent].mat * node.mat
# node.head = node.mat.to_translation()
# node.tai |
makyo/honeycomb | administration/migrations/0009_auto_20161109_0428.py | Python | mit | 454 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-09 04:28
from __future__ import unicode_literals
fr | om djang | o.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('administration', '0008_auto_20161107_1918'),
]
operations = [
migrations.AlterField(
model_name='ban',
name='end_date',
field=models.DateField(blank=True),
),
]
|
jjagielka/reporter | src/reports/models.py | Python | apache-2.0 | 2,148 | 0.01676 | from django.db import models
from django.db import models
from django.utils.translation import ugettext as _
# from django.core.urlresolvers import reverse_lazy, reverse
from django.conf import settings
from projects.models import Project, Customer
class Report (models.Model):
HIGHLIGHT = 'HL'
LOWLIGHT = 'LL'
ESCALATION = 'XS'
LIGHTS = (
(HIGHLIGHT, _('Highlight')),
(LOWLIGHT, _('Lowlight')),
(ESCALATION,_('Escalation')),
)
year = models.PositiveIntegerField (null=False, blank=False,
verbose_name=_("year"))
period = models.PositiveIntegerField (null=False, blank=False,
verbose_name=_("period"))
light = models.CharField (max_length=2, choices=LIGHTS, default=HIGHLIGHT)
description = models.TextField (null=False, blank=True, verbose_name=_("description"))
created = models.DateTimeField (auto_now_add=True)
created_by = models.ForeignKey (settings.AUTH_U | SER_MODEL, related_name='+', editable=False)
modified = models.DateTimeField (auto_now=True)
modified_by = models.ForeignKey (settings.AUTH_USER_MODEL, related_name='+', edi | table=False)
class Meta:
verbose_name = _('Report')
verbose_name_plural = _('Reports')
ordering = ['light','project','category']
unique_together = ("target", "year", "period")
abstract = True
def __str__(self):
return self.title
#return str(self.project) + (self.title if len(self.title) < 30 else (self.title[:27]+'...'))
return mark_safe("<b>%s</b>: %s" % (self.project, self.title))
# def get_absolute_url (self):
# return reverse('reporting-detail', args=[str(self.id)])
class ProjectReport (Report):
target = models.ForeignKey (Project, related_name='reports')
class Meta:
verbose_name = _('Project report')
verbose_name_plural = _('Project reports')
class CustomerReport (Report):
target = models.ForeignKey (Customer, related_name='reports')
class Meta:
verbose_name = _('Customer report')
verbose_name_plural = _('Customer reports')
|
mcecchi/SuperOcto | RoboLCD/RoboLCD/lcd/z_offset_wizard.py | Python | agpl-3.0 | 12,560 | 0.008758 | from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.logger import Logger
from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, NumericProperty, ListProperty
from .. import roboprinter
from printer_jog import printer_jog
from kivy.clock import Clock
from pconsole import pconsole
from common_screens import Picture_Button_Screen, Wait_Screen, Override_Layout,Picture_Button_Screen_Body, Button_Screen
from Language import lang
class ZoffsetWizard(object):
def __init__(self, robosm, back_destination, **kwargs):
super(ZoffsetWizard, self).__init__()
self.sm = robosm
self.name = 'zoffset' #name of initial screen
self.z_pos_init = 20.00
self.z_pos_end = 0.0
self.first_screen(back_destination=back_destination)
#position callback variables
self.old_xpos = 0
self.old_ypos = 0
self.old_zpos = 0
def first_screen(self, **kwargs):
model = roboprinter.printer_instance._settings.get(['Model'])
#body_text,image_source, button_function, button_text = roboprinter.lang.pack['Button_Screen']['Default_Button']
c = Button_Screen(lang.pack['ZOffset_Wizard']['Wizard_Description'],
self.second_screen,
button_text=lang.pack['ZOffset_Wizard']['Start'])
self.sm._generate_backbutton_screen(name=self.name, title=roboprinter.lang.pack['ZOffset_Wizard']['Welcome'] , back_destination=kwargs['back_destination'], content=c)
def second_screen(self, *args):
"""Loading Screen
Displays to user that Z Axis is moving """
self._prepare_printer()
title = lang.pack['ZOffset_Wizard']['Z_14']
back_destination = roboprinter.robo_screen()
name = self.name + "[1]"
layout = Wait_Screen(self.check_temp_and_change_screen, '',lang.pack['ZOffset_Wizard']['Auto_Next'])
self.sm._generate_backbutton_screen(name=name | , title=title, back_destin | ation=back_destination, content=layout)
Logger.info("2nd Screen: RENDERED")
def check_temp_and_change_screen(self):
temps = roboprinter.printer_instance._printer.get_current_temperatures()
#find the temperature
if 'tool0' in temps:
extruder_one_temp = temps['tool0']['actual']
if extruder_one_temp < 100:
self.third_screen()
else:
self.temperature_wait_screen()
def temperature_wait_screen(self, *args):
title = roboprinter.lang.pack['ZOffset_Wizard']['Wait']
name = self.name + "temperature"
back_destination = roboprinter.robo_screen()
layout = Z_Offset_Temperature_Wait_Screen(self.third_screen)
self.sm._generate_backbutton_screen(name = name,
title = title,
back_destination = back_destination,
content = layout)
Logger.info("Temperature Wait Screen Activated")
def third_screen(self, *args):
#turn off fan
roboprinter.printer_instance._printer.commands('M106 S0')
"""
Instructions screen
"""
title = roboprinter.lang.pack['ZOffset_Wizard']['Z_24']
name = self.name + "[2]"
back_destination = roboprinter.robo_screen()
Logger.info("Updated Zoffset is: " + str(self.z_pos_init))
layout = Z_Offset_Adjuster()
layout.ids.done.fbind('on_press', self.fifth_screen)
self.sm._generate_backbutton_screen(title=title, name=name, back_destination=back_destination, content=layout)
#This is where the ZOffset Wizard finishes
#this is also where we should make the mod for testing the new Zoffset
def fifth_screen(self, *args):
self.z_pos_end = float(self._capture_zpos()) #schema: (x_pos, y_pos, z_pos)
self.z_pos_end = float(self._capture_zpos()) #runs twice because the first call returns the old position
Logger.info("ZCapture: z_pos_end {}".format(self.z_pos_end))
self.zoffset = (self.z_pos_end) * -1.00
title = roboprinter.lang.pack['ZOffset_Wizard']['Z_44']
name = self.name + "[3]"
back_destination = roboprinter.robo_screen()
#title_text, body_text,image_source, button_function, button_text = roboprinter.lang.pack['Button_Screen']['Default_Button']
layout = Picture_Button_Screen('[size=40][color=#69B3E7]' + lang.pack['ZOffset_Wizard']['Finish_Title'] + '[/color][/size]',
'[size=30]' + lang.pack['ZOffset_Wizard']['Finish_Body1'] + ' {} '.format(self.zoffset) + lang.pack['ZOffset_Wizard']['Finish_Body2'],
'Icons/Manual_Control/check_icon.png',
self._end_wizard,
button_text="[size=30]" + lang.pack['ZOffset_Wizard']['Save']
)
self.sm._generate_backbutton_screen(
name=name,
title=title,
back_destination=back_destination,
content=layout
)
def wait_for_update(self, dt):
pconsole.query_eeprom()
if pconsole.home_offset['Z'] != 0:
roboprinter.printer_instance._printer.commands('G28')
self.sm.go_back_to_main()
return False
#####Helper Functions#######
def _prepare_printer(self):
# Prepare printer for zoffset configuration
#jog the Z Axis Down to prevent any bed interference
jogger = {'z': 160}
printer_jog.jog(desired=jogger, speed=1500, relative=True)
#kill the extruder
roboprinter.printer_instance._printer.commands('M104 S0')
roboprinter.printer_instance._printer.commands('M140 S0')
roboprinter.printer_instance._printer.commands('M106 S255')
roboprinter.printer_instance._printer.commands('M502')
roboprinter.printer_instance._printer.commands('M500')
roboprinter.printer_instance._printer.commands('G28')
bed_x = 0
bed_y = 0
roboprinter.printer_instance._printer.commands('G1 X' + str(bed_x) + ' Y' + str(bed_y) +' F10000')
roboprinter.printer_instance._printer.commands('G1 Z20 F1500')
def position_callback(self, dt):
temps = roboprinter.printer_instance._printer.get_current_temperatures()
pos = pconsole.get_position()
if pos != False:
xpos = int(float(pos[0]))
ypos = int(float(pos[1]))
zpos = int(float(pos[2]))
extruder_one_temp = 105
#find the temperature
if 'tool0' in temps.keys():
extruder_one_temp = temps['tool0']['actual']
Logger.info("Counter is at: " + str(self.counter))
#check the extruder physical position
if self.counter > 25 and xpos == self.old_xpos and ypos == self.old_ypos and zpos == self.old_zpos:
if self.sm.current == 'zoffset[1]':
if extruder_one_temp < 100:
Logger.info('Succesfully found position')
self.third_screen()
return False
else:
self.temperature_wait_screen()
return False
else:
Logger.info('User went to a different screen Unscheduling self.')
#turn off fan
roboprinter.printer_instance._printer.commands('M106 S0')
return False
#if finding the position fails it will wait 30 seconds and continue
self.counter += 1
if self.counter > 60:
if self.sm.current == 'zoffset[1]':
Logger.info('could not find position, but continuing anyway')
|
LordSputnik/python-mbio | mbio/entities/entity.py | Python | mit | 235 | 0.008511 |
class Entity(object):
RELEASE, REC | ORDING = range(0,2)
def __init__(self, *args, **kwargs):
pass
def json(self):
raise NotImplementedError
def parse_json(self, jso | n):
raise NotImplementedError
|
Azure/azure-sdk-for-python | sdk/core/azure-core/tests/async_tests/test_base_polling_async.py | Python | mit | 30,931 | 0.004106 | #--------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#--------------------------------------------------------------------------
import base64
import json
import pickle
import re
from utils import HTTP_REQUESTS
from azure.core.pipeline._tools import is_rest
import types
import unittest
try:
from unittest import mock
except ImportError:
import mock
import pytest
from requests import Request, Response
from msrest import Deserializer
from azure.core.polling import async_poller, AsyncLROPoller
from azure.core.exceptions import DecodeError, HttpResponseError
from azure.core import AsyncPipelineClient
from azure.core.pipeline import PipelineResponse, AsyncPipeline, PipelineContext
from azure.core.pipeline.transport import AsyncioRequestsTransportResponse, AsyncHttpTransport
from azure.core.polling.async_base_polling import (
AsyncLROBasePolling,
)
from utils import ASYNCIO_REQUESTS_TRANSPORT_RESPONSES, request_and_responses_product, create_transport_response
from rest_client_async import AsyncTestRestClient
class SimpleResource:
"""An implementation of Python 3 SimpleNamespace.
Used to deserialize resource objects from response bodies where
no particular object type has been specified.
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
keys = sorted(self.__dict__)
items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys)
return "{}({})".format(type(self).__name__, ", ".join(items))
def __eq__(self, other):
return self.__dict__ == other.__dict__
class BadEndpointError(Exception):
pass
TEST_NAME = 'foo'
RESPONSE_BODY = {'properties':{'provisioningState': 'InProgress'}}
ASYNC_BODY = json.dumps({ 'status': 'Succeeded' })
ASYNC_URL = 'http://dummyurlFromAzureAsyncOPHeader_Return200'
LOCATION_BODY = json.dumps({ 'name': TEST_NAME })
LOCATION_URL = 'http://dummyurlurlFromLocationHeader_Return200'
RESOURCE_BODY = json.dumps({ 'name': TEST_NAME })
RESOURCE_URL = 'http://subscriptions/sub1/resourcegroups/g1/resourcetype1/resource1'
ERROR = 'http://dummyurl_ReturnError'
POLLING_STATUS = 200
CLIENT = AsyncPipelineClient("http://example.org")
CLIENT.http_request_type = None
CLIENT.http_r | esponse_type = None
async def mock_run(client_self, request, **kwargs):
return TestBasePolling.mock_update(client_self.http_request_type, client_self.http_response_type, request.url)
CLIENT._pipeline.run = types.MethodType(mock_run, CLIENT)
@pytest.fixture
def client():
# The poller itself don't use it, so we don't need something functionnal
return AsyncPipelineClient("https://baseurl")
@pytest.fixture
def asyn | c_pipeline_client_builder():
"""Build a client that use the "send" callback as final transport layer
send will receive "request" and kwargs as any transport layer
"""
def create_client(send_cb):
class TestHttpTransport(AsyncHttpTransport):
async def open(self): pass
async def close(self): pass
async def __aexit__(self, *args, **kwargs): pass
async def send(self, request, **kwargs):
return await send_cb(request, **kwargs)
return AsyncPipelineClient(
'http://example.org/',
pipeline=AsyncPipeline(
transport=TestHttpTransport()
)
)
return create_client
@pytest.fixture
def deserialization_cb():
def cb(pipeline_response):
return json.loads(pipeline_response.http_response.text())
return cb
@pytest.fixture
def polling_response():
polling = AsyncLROBasePolling()
headers = {}
response = Response()
response.headers = headers
response.status_code = 200
polling._pipeline_response = PipelineResponse(
None,
AsyncioRequestsTransportResponse(
None,
response,
),
PipelineContext(None)
)
polling._initial_response = polling._pipeline_response
return polling, headers
def test_base_polling_continuation_token(client, polling_response):
polling, _ = polling_response
continuation_token = polling.get_continuation_token()
assert isinstance(continuation_token, str)
polling_args = AsyncLROBasePolling.from_continuation_token(
continuation_token,
deserialization_callback="deserialization_callback",
client=client,
)
new_polling = AsyncLROBasePolling()
new_polling.initialize(*polling_args)
@pytest.mark.asyncio
@pytest.mark.parametrize("http_request,http_response", request_and_responses_product(ASYNCIO_REQUESTS_TRANSPORT_RESPONSES))
async def test_post(async_pipeline_client_builder, deserialization_cb, http_request, http_response):
# Test POST LRO with both Location and Operation-Location
# The initial response contains both Location and Operation-Location, a 202 and no Body
initial_response = TestBasePolling.mock_send(
http_request,
http_response,
'POST',
202,
{
'location': 'http://example.org/location',
'operation-location': 'http://example.org/async_monitor',
},
''
)
async def send(request, **kwargs):
assert request.method == 'GET'
if request.url == 'http://example.org/location':
return TestBasePolling.mock_send(
http_request,
http_response,
'GET',
200,
body={'location_result': True}
).http_response
elif request.url == 'http://example.org/async_monitor':
return TestBasePolling.mock_send(
http_request,
http_response,
'GET',
200,
body={'status': 'Succeeded'}
).http_response
else:
pytest.fail("No other query allowed")
client = async_pipeline_client_builder(send)
# LRO options with Location final state
poll = async_poller(
client,
initial_response,
deserialization_cb,
AsyncLROBasePolling(0))
result = await poll
assert result['location_result'] == True
# Location has no body
async def send(request, **kwargs):
assert request.method == 'GET'
if request.url == 'http://example.org/location':
return TestBasePolling.mock_send(
http_request,
http_response,
'GET',
200,
body=None
).http_response
elif request.url == 'http://example.org/async_monitor':
return TestBasePolling.mock_send(
http_request,
http_response,
'GET',
200,
|
mysql/mysql-utilities | mysql-test/suite/replication/t/export_rpl_errors.py | Python | gpl-2.0 | 7,565 | 0 | #
# Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
export_rpl_errors test.
"""
import replicate
import mutlib
from mysql.utilities.exception import MUTLibError
_RPL_OPTIONS = ["--rpl-file=test.txt", "--comment-rpl", "--rpl-user=root"]
class test(replicate.test):
"""check replication errors for export utility
This test executes a series of export database operations on a single
server using a variety of replication options exercising the errors
associated with the --rpl commands and processing.
"""
server3 = None
s3_serverid = None
def check_prerequisites(self):
# Check MySQL server version - Must be 5.1.0 or higher
if not self.servers.get_server(0).check_version_compat(5, 1, 0):
raise MUTLibError("Test requires server version 5.1.0 or higher")
return replicate.test.check_prerequisites(self)
def setup(self):
result = replicate.test.setup(self)
index = self.servers.find_server_by_name("new_server1")
# If server exists, kill it
if index >= 0:
server = self.servers.get_server(index)
self.servers.stop_server(server)
self.servers.remove_server(server)
self.s3_serverid = self.servers.get_next_id()
res = self.servers.spawn_new_server(self.server0, self.s1_serverid,
"new_server1")
if not res:
raise MUTLibError("Cannot spawn replication new server.")
self.server3 = res[0]
self.servers.add_new_server(self.server3, True)
# Create util_test database to avoid not exist error.
self.server1.exec_query("DROP DATABASE IF EXISTS util_test")
self.server1.exec_query("CREATE DATABASE util_test")
return result
def run(self):
self.res_fname = "result.txt"
from_conn = "--server=" + self.build_connection_string(self.server1)
test_num = 1
# Check --rpl option errors
for option in _RPL_OPTIONS:
cmd_str = "mysqldbexport.py {0} util_test ".format(from_conn)
comment = ("Test case {0} - error: {1} but no "
"--rpl".format(test_num, option))
res = mutlib.System_test.run_test_case(self, 2, cmd_str + option,
comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
test_num += 1
all_options = " ".join(_RPL_OPTIONS)
comment = ("Test case {0} - error: {1} but no "
"--rpl".format(test_num, all_options))
res = mutlib.System_test.run_test_case(self, 2, cmd_str + all_options,
comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
test_num += 1
cmd_str = ("mysqldbexport.py util_test --export=both "
"--rpl-user=rpl:rpl {0} ".format(from_conn))
comment = "Test case {0} - error: --rpl-file bad path".format(test_num)
option = " --rpl=master --rpl-file=/bad/path/not/there.atall "
res = mutlib.System_test.run_test_case(self, 1, cmd_str + option,
comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
test_num += 1
self.server1.exec_query("CREATE USER imnotamouse@localhost")
cmd_str = "mysqldbexport.py util_test --export=data {0} ".format(
from_conn)
comment = "Test case {0} - warning: --rpl-user missing".format(
test_num)
option = " --rpl= | master "
res = mutlib.System_test.run_test_case(self, 0, cmd_str + option,
comment)
| if not res:
raise MUTLibError("{0}: failed".format(comment))
test_num += 1
comment = "Test case {0} - error: --rpl-user missing user".format(
test_num)
option = " --rpl=master --rpl-user=missing "
res = mutlib.System_test.run_test_case(self, 1, cmd_str + option,
comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
test_num += 1
comment = ("Test case {0} - error: --rpl-user missing "
"privileges".format(test_num))
option = " --rpl=master --rpl-user=imnotamouse "
res = mutlib.System_test.run_test_case(self, 1, cmd_str + option,
comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
test_num += 1
self.server1.exec_query("DROP USER imnotamouse@localhost")
self.server1.exec_query("STOP SLAVE")
self.server1.exec_query("RESET SLAVE")
self.server2.exec_query("STOP SLAVE")
self.server2.exec_query("RESET SLAVE")
comment = "Test case {0} - error: slave not connected".format(test_num)
option = " --rpl=slave "
res = mutlib.System_test.run_test_case(self, 1, cmd_str + option,
comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
test_num += 1
from_conn = "--server=" + self.build_connection_string(self.server3)
self.server3.exec_query("CREATE DATABASE util_test")
cmd_str = ("mysqldbexport.py util_test --export=both "
"--rpl-user=rpl:rpl {0} ".format(from_conn))
comment = "Test case {0} - error: no binlog".format(test_num)
option = " --rpl=master "
res = mutlib.System_test.run_test_case(self, 1, cmd_str + option,
comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
self.replace_result("CHANGE MASTER", "CHANGE MASTER <goes here>\n")
self.replace_result("# CHANGE MASTER", "# CHANGE MASTER <goes here>\n")
self.replace_substring(str(self.server1.port), "PORT1")
self.replace_substring(str(self.server2.port), "PORT2")
# Mask warning message if --rpl-user is not specified. For 5.7 servers
# the message is different because only one root account is available.
self.replace_result("# WARNING: No --rpl-user specified and ",
"# WARNING: No --rpl-user specified and ...\n")
return True
def get_result(self):
return self.compare(__name__, self.results)
def record(self):
return self.save_result_file(__name__, self.results)
def cleanup(self):
# Kill the servers that are only used in this test.
kill_list = ['new_server1']
return (replicate.test.cleanup(self) and
self.kill_server_list(kill_list))
|
pankshok/xoinvader | xoinvader/animation.py | Python | mit | 8,490 | 0.000236 | """Animation.
Animation is set of keyframes.
Value of selected attribute changes in time.
Keyframe:
(time, value)
Objects have animation manager which manages animation graph and switching."""
from operator import itemgetter
from eaf import Timer
from xoinvader.utils import Point
class AnimationBoundariesExceeded(Exception):
"""Exception to show that interpolated value will be incorrect."""
def __init__(self, first, current_time, second):
super(AnimationBoundariesExceeded, self).__init__(
self,
f"Animation frame boundaries exceeded: {first} <= {current_time} <= {second}",
)
class InterpolationUnknownTypes(Exception):
"""Such type combination is unsupported."""
def __init__(self, first, second):
super(InterpolationUnknownTypes, self).__init__(
self, f"Unknown types of interpolating values: {first} and {second}"
)
# TODO: Implement animation graph and etc
class AnimationManager(object):
"""Manage list of object animation."""
def __init__(self):
self._animations = {}
self._animation = None
@property
def animation(self):
"""AnimationManager's current animation name.
To set animation - assign it's name.
:getter: yes
:setter: yes
:type: str
"""
if self._animation:
return self._animation.name
else:
raise AttributeError("There is no available animation.")
@animation.setter
def animation(self, name):
if name in self._animations:
self._animation = self._animations[name]
else:
raise ValueError(f"No such animation: '{name}'.")
def add(self, name, *args, **kwargs):
"""Add new animation, pass args to Animation class.
See interface of `class::xoinvader.animation.Animation`.
:param str name: animation name
"""
animation = Animation(name, *args, **kwargs)
self._animations[name] = animation
if not self._animation:
self._animation = animation
def update(self, dt):
"""Update manager's state."""
if not self._animation:
return
try:
self._animation.update(dt)
except StopIteration:
return # TODO: think about method to change animation
# pylint: disable=too-many-instance-attributes,too-many-arguments
# pylint: disable=too-few-public-methods
class Animation(object):
"""Animation unit.
Animation object holds sorted list of (time, value) items and changes
selected attribute of bound object according to local animation time.
Time measured by timer. When current time is greater or equal then time
of next keyframe - animation object changes it to appropriate value.
When animation is done and if not looped - raise StopIteration.
In case of interpolated animation value calculation occurs within two
bounding frames and on frame switch.
:param str name: animation name
:param object bind: object to bind animation
:param str attr: attribute to change in frames
:param list keyframes: (float, object) tuples
:param bool interp: interpolate values between frames or not
:param bool loop: loop animation or not
"""
def __init__(self, name, bind, attr, keyframes, interp=False, loop=False):
self._name = name
self._obj = bind
self._attr = attr
if not keyframes:
raise ValueError("Animation keyframes must not be empty.")
self._keyframes = sorted(keyframes, key=itemgetter(0))
self._interp = interp
self._loop = loop
# Timer for tracking local time
self._timer = Timer(self._keyframes[-1][0], lambda: True)
self._timer.start()
# Current keyframe index
self._current = 0
if self._interp:
self.update = self._update_interpolated
else:
self.update = self._update_discrete
@property
def name(self):
"""Animation's name.
:getter: yes
:setter: no
:type: str
"""
return self._name
def _apply_value(self, value):
"""Apply new value to linked object.
:param obj value: value to apply
"""
setattr(self._obj, self._attr, value)
def _update_interpolated(self, dt):
"""Advance animation and interpolate value.
NOTE: animation frame switching depends on interp mode
animation with interpolation switches frame only when current local
time exceeds NEXT frames' time border.
"""
self._check_animation_state()
self._timer.update(dt)
current_time = self._timer.elapsed
keyframe = self._keyframes[self._current]
next_keyframe = self._keyframes[self._current + 1]
# it's time to switch keyframe
if current_time >= next_keyframe[0]:
self._current += 1
keyframe = self._keyframes[self._current]
if self._current == len(self._keyframes) - 1:
self._apply_value(keyframe[1])
self._current += 1
self._check_animation_state()
return
next_keyframe = self._keyframes[self._current + 1]
value = interpolate(keyframe, next_keyframe, current_time)
self._apply_value(value)
def _update_discrete(self, dt):
"""Advance animation without interpolating value.
NOTE: animation frame switching depends on interp mode
discrete animation swiches frame and updates value only if
current local time is >= time of current keyframe.
No need to worry about calculating value between frames - thus
no need to complicate behaviour.
"""
self._check_animation_state()
self._timer.update(dt)
keyframe = self._keyframes[self._current]
# Check if animation need to switch keyframe
if self._timer.elapsed >= keyframe[0]:
self._apply_value(keyframe[1])
self._cur | rent += 1
def _check_animation_state(self):
"""Check animation state and restart if needed.
:raise StopIteration: when animation exceeded frames.
"""
if le | n(self._keyframes) == self._current:
if self._loop:
self._current = 0
self._timer.restart()
else:
self._timer.stop()
raise StopIteration
def linear_equation(val1, val2, time1, time2, current_time):
"""Linear equation to get interpolated value.
:param float val1: first keyframe value
:param float val2: second keyframe value
:param float time1: first keyframe local time
:param float time2: second keyframe local time
:param float current_time: current animation local time
"""
return val1 + (val2 - val1) / (time2 - time1) * (current_time - time1)
def same_type(values, types):
"""Check if values are belongs to same type or type tuple.
:param collections.Iterable values: values to check type similarity
:param tuple|type types: type or tuple of types
"""
return all(map(lambda it: isinstance(it, types), values))
def interpolate(first, second, current_time):
"""Interpolate value by two bounding keyframes.
:param collections.Iterable first: first bounding keyframe
:param collections.Iterable second: second bounding keyframe
:param float current_time: current animation local time
:raises AnimationBoundariesExceeded: when time interval is invalid
:raises InterpolationUnknownTypes: when interpolating invalid types
"""
if not first[0] <= current_time <= second[0]:
raise AnimationBoundariesExceeded(first[0], current_time, second[0])
def frames_of(*args):
"""If frames both of specified type."""
return same_type((first[1], second[1]), args)
if frames_of(int, float):
value = linear_equation(
float(first[1]),
float(second[1]),
float(first[0]),
float(second[0]),
float(current_time),
) |
stasm/app-validator | tests/js/test_features.py | Python | bsd-3-clause | 4,061 | 0 | from functools import wraps
from nose.tools import eq_
from js_helper import TestCase
def uses_feature(name):
def wrap(func):
@wraps(func)
def inner(self, *args, **kw):
func(self, *args, **kw)
self.assert_has_feature(name)
return inner
return wrap
class FeatureTester(TestCase):
def test_all(self):
def _test(feature, script):
self.setUp()
self.setup_err()
self.run_script(script)
self.assert_has_feature(feature)
for feature, script in self.TESTS:
yield _test, feature, script
class TestWindowFeatures(FeatureTester):
"""Tests for feature APIs in the global context."""
TESTS = [
("ACTIVITY", "var x = new MozActivity();"),
("LIGHT_EVENTS", "window.ondevicelight = function() {};"),
("ARCHIVE", "var x = new ArchiveReader();"),
("INDEXEDDB", "var x = new mozIndexedDB();"),
("PROXIMITY", "window.ondeviceproximity = function() {};"),
("ORIENTATION", "window.ondeviceorientation = function() {};"),
("TOUCH", "window.ontouchstart = function() {};"),
("AUDIO", "var audio = new Audio(); audio.src = 'asdf';"),
("WEBAUDIO", "var x = new mozAudioContext();"),
("QUOTA", "var x = new mozPersistentStorage();"),
("QUOTA", "var x = new StorageInfo();"),
("WEBRTC_MEDIA", "var x = new MediaStream();"),
("WEBRTC_DATA", "var x = new DataChannel();"),
("WEBRTC_PEER", "var x = new RTCPeerConnection();"),
("SPEECH_SYN", "var x = speechSynthesis.foo();"),
("SPEECH_REC", "var x = new SpeechRecognition();"),
("POINTER_LOCK", "document.documentElement.requestPointerLock()"),
]
class TestNavigatorFeature | s(FeatureTester):
"""Tests for feature APIs in the navigator.* object."""
TESTS = [
("APPS", "navigator.mozApps.install('foo/bar.webapp');"),
("APPS", "navigator.apps.install('foo/bar.webapp');"),
("PACKAGED_APPS", "navigator.apps.installPackage('foo/bar.webapp');"),
("PAY", "navigator.mozPay.foo();"),
("BATTERY", "navigator.battery.foo();"),
("BLUETOOTH", "navigator.bluetooth.foo();"),
("CONTACTS", "navigator.mo | zContacts.foo();"),
("DEVICE_STORAGE", "navigator.getDeviceStorage();"),
("GEOLOCATION", "navigator.getCurrentPosition();"),
("IDLE", "navigator.addIdleObserver();"),
("NETWORK_INFO", "navigator.connection.foo();"),
("NETWORK_STATS", "navigator.networkStats.foo();"),
("PUSH", "navigator.mozPush.foo();"),
("TIME_CLOCK", "navigator.mozTime.foo();"),
("VIBRATE", "navigator.vibrate.foo();"),
("FM", "navigator.mozFM();"),
("FM", "navigator.mozFMRadio();"),
("SMS", "navigator.mozSms.foo();"),
("GAMEPAD", "navigator.getGamepad();"),
("MOBILEID", "navigator.getMobileIdAssertion();"),
("NOTIFICATION", "navigator.mozNotification.foo();"),
("ALARM", "navigator.mozAlarms.foo();"),
("TCPSOCKET", "var x = new navigator.mozTCPSocket();"),
("THIRDPARTY_KEYBOARD_SUPPORT",
"var x = navigator.mozInputMethod.foo()"),
("NETWORK_INFO_MULTIPLE",
"var x = navigator.mozMobileConnections.foo();"),
]
class TestInstMembersFeatures(FeatureTester):
"""Tests for feature APIs in instance properties."""
TESTS = [
("TOUCH",
"document.getElementById('foo').ontouchstart = function() {};"),
("FULLSCREEN",
"document.getElementById('foo').requestFullScreen();"),
]
class TestGUMFeatures(FeatureTester):
"""Tests for getUserMedia-related feature APIs."""
TESTS = [
("CAMERA", "navigator.getUserMedia({video:true})"),
("CAMERA", "navigator.getUserMedia({picture:true})"),
("MIC", "navigator.getUserMedia({audio:true})"),
("SCREEN_CAPTURE",
"navigator.getUserMedia({video:{mandatory:"
"{chromeMediaSource:'screen'}}})"),
]
|
Teekuningas/mne-python | examples/visualization/plot_montage.py | Python | bsd-3-clause | 2,108 | 0 | # -*- coding: utf-8 -*-
"""
.. _plot_montage:
Plotting sensor layouts of EEG systems
======================================
This example illustrates how to l | oad all the EEG system montages
shipped in MNE-python, and display it on fsaverage template.
""" # noqa: D205, D400
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Joan Massich <mailsik@gmail.com>
#
# License: BSD Style.
import os.path as op
im | port mne
from mne.channels.montage import get_builtin_montages
from mne.datasets import fetch_fsaverage
from mne.viz import set_3d_title, set_3d_view
###############################################################################
# Check all montages against a sphere
for current_montage in get_builtin_montages():
montage = mne.channels.make_standard_montage(current_montage)
info = mne.create_info(
ch_names=montage.ch_names, sfreq=100., ch_types='eeg')
info.set_montage(montage)
sphere = mne.make_sphere_model(r0='auto', head_radius='auto', info=info)
fig = mne.viz.plot_alignment(
# Plot options
show_axes=True, dig='fiducials', surfaces='head',
bem=sphere, info=info)
set_3d_view(figure=fig, azimuth=135, elevation=80)
set_3d_title(figure=fig, title=current_montage)
###############################################################################
# Check all montages against fsaverage
subjects_dir = op.dirname(fetch_fsaverage())
for current_montage in get_builtin_montages():
montage = mne.channels.make_standard_montage(current_montage)
# Create dummy info
info = mne.create_info(
ch_names=montage.ch_names, sfreq=100., ch_types='eeg')
info.set_montage(montage)
fig = mne.viz.plot_alignment(
# Plot options
show_axes=True, dig='fiducials', surfaces='head', mri_fiducials=True,
subject='fsaverage', subjects_dir=subjects_dir, info=info,
coord_frame='mri',
trans='fsaverage', # transform from head coords to fsaverage's MRI
)
set_3d_view(figure=fig, azimuth=135, elevation=80)
set_3d_title(figure=fig, title=current_montage)
|
eestay/edx-platform | lms/djangoapps/verify_student/models.py | Python | agpl-3.0 | 36,885 | 0.002305 | # -*- coding: utf-8 -*-
"""
Models for Student Identity Verification
This is where we put any models relating to establishing the real-life identity
of a student over a period of time. Right now, the only models are the abstract
`PhotoVerification`, and its one concrete implementation
`SoftwareSecurePhotoVerification`. The hope is to keep as much of the
photo verification process as generic as possible.
"""
from datetime import datetime, timedelta
from email.utils import formatdate
import functools
import json
import logging
import uuid
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import pytz
import requests
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from model_utils.models import StatusModel
from model_utils import Choices
from verify_student.ssencrypt import (
random_aes_key, encrypt_and_encode,
generate_signed_message, rsa_encrypt
)
from reverification.models import MidcourseReverificationWindow
log = logging.getLogger(__name__)
def generateUUID(): # pylint: disable=invalid-name
""" Utility function; generates UUIDs """
return str(uuid.uuid4())
class VerificationException(Exception):
pass
def status_before_must_be(*valid_start_statuses):
"""
Helper decorator with arguments to make sure that an object with a `status`
attribute is in one of a list of acceptable status states before a method
is called. You could use it in a class definition like:
@status_before_must_be("submitted", "approved", "denied")
def refund_user(self, user_id):
# Do logic here...
If the object has a status that is not listed when the `refund_user` method
is invoked, it will throw a `VerificationException`. This is just to avoid
distracting boilerplate when looking at a Model that needs to go through a
workflow process.
"""
def decorator_func(func):
"""
Decorator function that gets returned
"""
@functools.wraps(func)
def with_status_check(obj, *args, **kwargs):
if obj.status not in valid_start_statuses:
exception_msg = (
u"Error calling {} {}: status is '{}', must be one of: {}"
).format(func, obj, obj.status, valid_start_statuses)
raise VerificationException(exception_msg)
return func(obj, *args, **kwargs)
return with_status_check
return decorator_func
class PhotoVerification(StatusModel):
"""
Each PhotoVerification represents a Student's attempt to establish
their identity by uploading a photo of themselves and a picture ID. An
attempt actually has a number of fields that need to be filled out at
different steps of the approval process. While it's useful as a Django Model
for the querying facilities, **you should only edit a `PhotoVerification`
object through the methods provided**. Initialize them with a user:
attempt = PhotoVerification(user=user)
We track this attempt through various states:
`created`
Initial creation and state we're in after uploading the images.
`ready`
The user has uploaded their images and checked that they can read the
images. There's a separate state here because it may be the case that we
don't actually submit this attempt for review until payment is made.
`submitted`
Submitted for review. The review may be done by a staff member or an
external service. The user cannot make changes once in this state.
`must_retry`
We submitted this, but there was an error on submission (i.e. we did not
get a 200 when we POSTed to Software Secure)
`approved`
An admin or an external service has confirmed that the user's photo and
photo ID match up, and that the photo ID's name matches the user's.
`denied`
The request has been denied. See `error_msg` for details on why. An
admin might later override this and change to `approved`, but the
student cannot re-open this attempt -- they have to create another
attempt and submit it instead.
Because this Model inherits from StatusModel, we can also do things like::
attempt.status == PhotoVerification.STATUS.created
attempt.status == "created"
pending_requests = PhotoVerification.submitted.all()
"""
######################## Fields Set During Creation ########################
# See class docstring for description of status states
STATUS = Choices('created', 'ready', 'submitted', 'must_retry', 'approved', 'denied')
user = models.ForeignKey(User, db_index=True)
# They can change their name later on, so we want to copy the value here so
# we always preserve what it was at the time they requested. We only copy
# this value during the mark_ready() step. Prior to that, you should be
# displaying the user's name from their user.profile.name.
name = models.CharField(blank=True, max_length=255)
# Where we place the uploaded image files (e.g. S3 URLs)
face_image_url = models.URLField(blank=True, max_length=255)
photo_id_image_url = models.URLField(blank=True, max_length=255)
# Randomly generated UUID so that external services can post back the
# results of checking a user's photo submission without use exposing actual
# user IDs or something too easily guessable.
receipt_id = models.CharField(
db_index=True,
default=lambda: generateUUID(),
max_length=255,
)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(auto_now=True, db_index=True)
# Indicates whether or not a user wants to see the verification status
# displayed on their dash. Right now, only relevant for allowing students
# to "dismiss" a failed midcourse reverification message
display = models.BooleanField(db_index=True, default=True)
######################## Fields Set When Submitting ########################
submitted_at = models.DateTimeField(null=True, db_index=True)
################# | ### Fields Set During Approval/Denial #####################
# If the review was done by | an internal staff member, mark who it was.
reviewing_user = models.ForeignKey(
User,
db_index=True,
default=None,
null=True,
related_name="photo_verifications_reviewed"
)
# Mark the name of the service used to evaluate this attempt (e.g
# Software Secure).
reviewing_service = models.CharField(blank=True, max_length=255)
# If status is "denied", this should contain text explaining why.
error_msg = models.TextField(blank=True)
# Non-required field. External services can add any arbitrary codes as time
# goes on. We don't try to define an exhuastive list -- this is just
# capturing it so that we can later query for the common problems.
error_code = models.CharField(blank=True, max_length=50)
class Meta(object): # pylint: disable=missing-docstring
abstract = True
ordering = ['-created_at']
##### Methods listed in the order you'd typically call them
@classmethod
def _earliest_allowed_date(cls):
"""
Returns the earliest allowed date given the settings
"""
days_good_for = settings.VERIFY_STUDENT["DAYS_GOOD_FOR"]
return datetime.now(pytz.UTC) - timedelta(days=days_good_for)
@classmethod
def user_is_verified(cls, user, earliest_allowed_date=None, window=None):
"""
Return whether or not a user has satisfactorily proved their identity.
Depending on the policy, this can expire after some period of time, so
a user might have to renew periodically.
If window=None, then this will check for the user's *initial* verification.
If window is set to anything else, it will check for the reverification
associated with that window.
"""
return cls.o |
catapult-project/catapult | common/py_utils/py_utils/camel_case_unittest.py | Python | bsd-3-clause | 1,740 | 0.002299 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import ab | solute_import
import unittest
from py_utils import camel_case
class CamelCase | Test(unittest.TestCase):
def testString(self):
self.assertEqual(camel_case.ToUnderscore('camelCase'), 'camel_case')
self.assertEqual(camel_case.ToUnderscore('CamelCase'), 'camel_case')
self.assertEqual(camel_case.ToUnderscore('Camel2Case'), 'camel2_case')
self.assertEqual(camel_case.ToUnderscore('Camel2Case2'), 'camel2_case2')
self.assertEqual(camel_case.ToUnderscore('2012Q3'), '2012_q3')
def testList(self):
camel_case_list = ['CamelCase', ['NestedList']]
underscore_list = ['camel_case', ['nested_list']]
self.assertEqual(camel_case.ToUnderscore(camel_case_list), underscore_list)
def testDict(self):
camel_case_dict = {
'gpu': {
'vendorId': 1000,
'deviceId': 2000,
'vendorString': 'aString',
'deviceString': 'bString'},
'secondaryGpus': [
{'vendorId': 3000, 'deviceId': 4000,
'vendorString': 'k', 'deviceString': 'l'}
]
}
underscore_dict = {
'gpu': {
'vendor_id': 1000,
'device_id': 2000,
'vendor_string': 'aString',
'device_string': 'bString'},
'secondary_gpus': [
{'vendor_id': 3000, 'device_id': 4000,
'vendor_string': 'k', 'device_string': 'l'}
]
}
self.assertEqual(camel_case.ToUnderscore(camel_case_dict), underscore_dict)
def testOther(self):
self.assertEqual(camel_case.ToUnderscore(self), self)
|
ampyche/ampyche | amp-cgi-bin/getters/getallplaylists.py | Python | mit | 1,063 | 0.031985 | #!/usr/bin/python3
import cgi
import json
import sqlite3 as lite
class GetAllPlaylist():
def __init__(self):
db_loc = "/usr/share/ampyche/db/ampychePlaylist.db"
self.db_loc = db_loc
ppll = ''
self.ppll = ppll
ppllpp = ''
self.ppllpp = ppllpp
def _make_a_list(self, genlist):
agen = []
for gen in genlist:
gen = ' '.join(gen)
agen.append(gen)
agen.sort()
return agen
def _get_playlists(self):
con = lite.connect(self.db_loc)
cur = con.cursor()
cur.execute("SELECT DISTINCT playlistname FROM playlis | ts")
plists = cur.fetchall()
con.close()
#plists = self._make_a_ | list(plists)
# for p in plists:
# ppllone = "<li class='playlistLi'><a href='#' class='plplay ui-btn ui-mini ui-icon-bullets ui-btn-icon-right'><h3>"
# ppllthree = "%s</h3></a></li>" %p
# self.ppll = self.ppll + ppllone + ppllthree
# bazooka = self.ppll + "</ul>"
# return bazooka
return plists
print("Content-Type: application/json\n\n")
GAP = GetAllPlaylist()
result = GAP._get_playlists()
print(json.dumps(result, sort_keys=True, indent=4)) |
sniemi/SamPy | sandbox/src1/examples/ftface_props.py | Python | bsd-2-clause | 3,413 | 0.008204 | #!/usr/bin/env python
"""
This is a demo script to show you how to use all the properties of an
FT2Font object. These describe global font properties. For
individual character metrices, use the Glyp object, as returned by
load_char
"""
import matplotlib
from matplotlib.ft2font import FT2Font
#fname = '/usr/local/share/matplotlib/VeraIt.ttf'
fname = matplotlib.get_data_path() + '/fonts/ttf/VeraIt.ttf'
#fname = '/usr/local/share/matplotlib/cmr10.ttf'
font = FT2Font(fname)
# these constants are used to access the style_flags and face_flags
FT_FACE_FLAG_SCALABLE = 1 << 0
FT_FACE_FLAG_FIXED_SIZES = 1 << 1
FT_FACE_FLAG_FIXED_WIDTH = 1 << 2
FT_FACE_FLAG_SFNT = 1 << 3
FT_FACE_FLAG_HORIZONTAL = 1 << 4
FT_FACE_FLAG_VERTICAL = 1 << 5
FT_FACE_FLAG_KERNING = 1 << 6
FT_FACE_FLAG_FAST_GLYPHS = 1 << 7
FT_FACE_FLAG_MULTIPLE_MASTERS = 1 << 8
FT_FACE_FLAG_GLYPH_NAMES = 1 << 9
FT_FACE_FLAG_EXTERNAL_STREAM = 1 << 10
FT_STYLE_FLAG_ITALIC = 1 << 0
FT_STYLE_FLAG_BOLD = 1 << 1
print 'Num faces :', font.num_faces # number of faces in file
print 'Num glyphs :', font.num_glyphs # number of glyphs in the face
print 'Family name :', font.family_name # face family name
print 'Syle name :', font.style_name # face syle name
print 'PS name :', font.postscript_name # the postscript name
print 'Num fixed :', font.num_fixed_sizes # number of embedded bitmap in face
# the following are only available if face.scalable
if font.scalable:
# the face global bounding box (xmin, ymin, xmax, ymax)
print 'Bbox :', font.bbox
# number of font units covered by the EM
print 'EM :', font.units_per_EM
# the ascender in 26.6 units
print 'Ascender | :', font.ascender
# the descender in 26.6 units
print 'Descender :', font.descender
# the height in 26.6 units
print 'Height :', font.height
# maximum horizontal cursor advance
print 'Max adv width :', font.max_advance_width
# same for vertical layout
print 'Max adv height :', font.m | ax_advance_height
# vertical position of the underline bar
print 'Underline pos :', font.underline_position
# vertical thickness of the underline
print 'Underline thickness :', font.underline_thickness
print 'Italics :', font.style_flags & FT_STYLE_FLAG_ITALIC != 0
print 'Bold :', font.style_flags & FT_STYLE_FLAG_BOLD != 0
print 'Scalable :', font.style_flags & FT_FACE_FLAG_SCALABLE != 0
print 'Fixed sizes :', font.style_flags & FT_FACE_FLAG_FIXED_SIZES != 0
print 'Fixed width :', font.style_flags & FT_FACE_FLAG_FIXED_WIDTH != 0
print 'SFNT :', font.style_flags & FT_FACE_FLAG_SFNT != 0
print 'Horizontal :', font.style_flags & FT_FACE_FLAG_HORIZONTAL != 0
print 'Vertical :', font.style_flags & FT_FACE_FLAG_VERTICAL != 0
print 'Kerning :', font.style_flags & FT_FACE_FLAG_KERNING != 0
print 'Fast glyphs :', font.style_flags & FT_FACE_FLAG_FAST_GLYPHS != 0
print 'Mult. masters :', font.style_flags & FT_FACE_FLAG_MULTIPLE_MASTERS != 0
print 'Glyph names :', font.style_flags & FT_FACE_FLAG_GLYPH_NAMES != 0
print dir(font)
cmap = font.get_charmap()
print font.get_kerning
|
fanout/django-eventstream | django_eventstream/storage.py | Python | mit | 2,978 | 0.031229 | import sys
import json
import datetime
from django.utils import timezone
from django.core.serializers.json import DjangoJSONEncoder
from .event import Event
is_python3 = sys.version_info >= (3,)
# minutes before purging an event from the database
EVENT_TIMEOUT = 60 * 24
# attempt to trim this many events per pass
EVENT_TRIM_BATCH = 50
class EventDoesNotExist(Exception):
def __init__(self, message, current_id):
super(Exception, self).__init__(message)
self.current_id = current_id
class StorageBase(object):
def append_event(self, channel, event_type, data):
raise NotImplementedError()
def get_events(self, channel, last_id, limit=100):
raise NotImplementedError()
def get_current_id(self, channel):
raise NotImplementedError()
class DjangoModelStorage(StorageBase):
def append_event(self, channel, event_type, data):
from . import models
db_event = models.Event(
channel=channel,
type=event_type,
data=json.dumps(data, cls=DjangoJSONEncoder))
db_event.save()
self.trim_event_log()
e = Event(
db_event.channel,
db_event.type,
data,
id=db_event.eid)
return e
def get_events(self, channel, last_id, limit=100):
from . import models
if is_python3:
assert(isinstance(last_id, int))
else:
assert(isinstance(last_id, (int, long)))
try:
ec = models.EventCounter.objects.get(name=channel)
cur_id = ec.value
except models.EventCounter.DoesNotExist:
cur_id = 0
if last_id == cur_id:
return []
# look up the referenced event first, to avoid a range query when
# the referenced event doesn't exist
try:
models.Event.objects.get(
channel=channel,
eid=last_id)
except models.Event.DoesNotExist:
raise EventDoesNotExist(
'No such event %d' % last_id,
cur_id)
# increase limit by 1 since we'll exclude the first result
db_events = models.Event.objects.filter(
channel=channel,
eid__gte=last_id
).order_by('eid')[:limit + 1]
# ensure the first result matches the referenced event
if len(db_events) == 0 or db_events[0].eid != last_ | id:
raise EventDoesNotExist(
'No such event %d' % last_id,
cur_id)
# exclude the first result
db_events = db_events[1:]
out = []
for db_event in db_events:
e = Event(
db_event.channel,
db_event.type,
json.loads(db_event.data),
id=db_event.eid)
out.append(e)
return out
def get_current_id(self, channel):
from . import models
try:
ec = models.EventC | ounter.objects.get(name=channel)
return ec.value
except models.EventCounter.DoesNotExist:
return 0
def trim_event_log(self):
from . import models
now = timezone.now()
cutoff = now - datetime.timedelta(minutes=EVENT_TIMEOUT)
while True:
events = models.Event.objects.filter(
created__lt=cutoff
)[:EVENT_TRIM_BATCH]
if len(events) < 1:
break
for e in events:
try:
e.delete()
except models.Event.DoesNotExist:
# someone else deleted. that's fine
pass
|
googleapis/python-aiplatform | samples/model-builder/create_training_pipeline_custom_job_sample.py | Python | apache-2.0 | 2,513 | 0.000398 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Union
from google.cloud import aiplatform
# [START aiplatform_sdk_create_trainin | g_pipeline_custom_job_sample]
def create_training_pipeline_custom_job_sample(
project: str,
location: str,
staging_bucket: str,
display_name: str,
script_path: str,
container_uri: str,
model_serving_container_image_uri: str,
dataset_id: Optional[str] = None,
mode | l_display_name: Optional[str] = None,
args: Optional[List[Union[str, float, int]]] = None,
replica_count: int = 0,
machine_type: str = "n1-standard-4",
accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED",
accelerator_count: int = 0,
training_fraction_split: float = 0.8,
validation_fraction_split: float = 0.1,
test_fraction_split: float = 0.1,
sync: bool = True,
):
aiplatform.init(project=project, location=location, staging_bucket=staging_bucket)
job = aiplatform.CustomTrainingJob(
display_name=display_name,
script_path=script_path,
container_uri=container_uri,
model_serving_container_image_uri=model_serving_container_image_uri,
)
# This example uses an ImageDataset, but you can use another type
dataset = aiplatform.ImageDataset(dataset_id) if dataset_id else None
model = job.run(
dataset=dataset,
model_display_name=model_display_name,
args=args,
replica_count=replica_count,
machine_type=machine_type,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
sync=sync,
)
model.wait()
print(model.display_name)
print(model.resource_name)
print(model.uri)
return model
# [END aiplatform_sdk_create_training_pipeline_custom_job_sample]
|
jhl667/galaxy_tools | tools/bwa/bwa_mem.py | Python | apache-2.0 | 12,929 | 0.022355 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #rlk moved this line to fall after the #! line
## yufei.luo@gustave.roussy 22/07/2013
## Copyright © 2014 CRS4 Srl. http://www.crs4.it/
## Modified by:
## Nicola Soranzo <nicola.soranzo@crs4.it>
"""
Runs BWA on single-end or paired-end data.
Produces a SAM file containing the mappings.
Works with BWA version >= 0.7.5.
NOTICE: In this wrapper, we only use 'mem' for mapping step.
usage: bwa_mem.py [options]
See below for options
"""
import optparse, os, shutil, subprocess, sys, tempfile
def __main__():
descr = "bwa_mem.py: Map (long length) reads against a reference genome with BWA-MEM."
parser = optparse.OptionParser(description=descr)
parser.add_option( '-t', '--threads', default=1, help='The number of threads to use [1]' )
parser.add_option( '--ref', help='The reference genome to use or index' )
parser.add_option( '-f', '--fastq', help='The (forward) fastq file to use for the mapping' )
parser.add_option( '-F', '--rfastq', help='The reverse fastq file to use for mapping if paired-end data' )
parser.add_option( '-u', '--output', help='The file to save the output (SAM format)' )
parser.add_option( '-g', '--genAlignType', help='The type of pairing (single or paired)' )
parser.add_option( '--params', help='Parameter setting to use (pre_set or full)' )
parser.add_option( '-s', '--fileSource', help='Whether to use a previously indexed reference sequence or one form history (indexed or history)' )
parser.add_option( '-D', '--dbkey', help='Dbkey for reference genome' )
parser.add_option( '-k', '--minSeedLength', type=int, help='Minimum seed length [19]' )
parser.add_option( '-w', '--bandWidth', type=int, help='Band width for banded alignment [100]' )
parser.add_option( '-d', '--offDiagonal', type=int, help='Off-diagonal X-dropoff [100]' )
parser.add_option( '-r', '--internalSeeds', type=float, help='Look for internal seeds inside a seed longer than {-k} * FLOAT [1.5]' )
parser.add_option( '-c', '--seedsOccurrence', type=int, help='Skip seeds with more than INT occurrences [10000]' )
parser.add_option( '-S', '--mateRescue', action='store_true', help='Skip mate rescue' )
parser.add_option( '-P', '--skipPairing', action='store_true', help='Skip pairing' )
parser.add_option( '-A', '--seqMatch', type=int, help='Score for a sequence match [1]' )
parser.add_option( '-B', '--mismatch', type=int, help='Penalty for a mismatch [4]' )
parser.add_option( '-O', '--gapOpen', type=int, help='Gap open penalty [6]' )
parser.add_option( '-E', '--gapExtension', type=int, help='Gap extension penalty; a gap of length k costs {-O} + {-E}*k [1]' )
parser.add_option( '-L', '--clipping', help='Penalty for clipping [5]' )
parser.add_option( '-U', '--unpairedReadpair', type=int, help='Penalty for an unpaired read pair [17]' )
parser.add_option( '-p', '--interPairEnd', action='store_true', help='FASTQ file consists of interleaved paired-end sequences' )
parser.add_option( '--rgid', help='Read group identifier' )
parser.add_option( '--rgsm', help='Sample' )
parser.add_option( '--rgpl', choices=[ 'CAPILLARY', 'LS454', 'ILLUMINA', 'SOLID', 'HELICOS', 'IONTORRENT', 'PACBIO' ], help='Platform/technology used to produce the reads' )
parser.add_option( '--rglb', help='Library name' )
parser.add_option( '--rgpu', help='Platform unit (e.g. flowcell-barcode.lane for Illumina or slide for SOLiD)' )
parser.add_option( '--rgcn', help='Sequencing center that produced the read' )
parser.add_option( '--rgds', help='Description' )
parser.add_option( '--rgdt', help='Date that run was produced (ISO8601 format date or date/time, like YYYY-MM-DD)' )
parser.add_option( '--rgfo', help='Flow order' )
parser.add_option( '--rgks', help='The array of nucleotide bases that correspond to the key sequence of each read' )
parser.add_option( '--rgpg', help='Programs used for processing the read group' )
parser.add_option( '--rgpi', help='Predicted median insert size' )
parser.add_option( '-T', '--minScore', type=int, help='Minimum score to output [30]' )
parser.add_option( '-a', '--outputAll', action='store_true', help='Output all found alignments for single-end or unpaired paired-end reads' )
parser.add_option( '-M', '--mark', action='store_true', help='Mark shorter split hits as secondary (for Picard/GATK compatibility)' )
parser.add_option( '-H', '--suppressHeader', dest='suppressHeader', action='store_true', help='Suppress header' )
(options, args) = parser.parse_args()
if len(args) > 0:
parser.error('Wrong number of arguments')
try:
tmp = tempfile.NamedTemporaryFile().name
tmp_stdout = open( tmp, 'wb' )
proc = subprocess.Popen( args='bwa 2>&1', shell=True, stdout=tmp_stdout )
tmp_stdout.close()
returncode = proc.wait()
stdout = None
for line in open( tmp_stdout.name, 'rb' ):
if line.lower().find( 'version' ) >= 0:
stdout = line.strip()
break
if stdout:
sys.stdout.write( 'BWA %s\n' % stdout )
else:
raise Exception
except:
sys.stdout.write( 'Could not determine BWA version\n' )
fastq = options.fastq
if options.rfastq:
rfastq = options.rfastq
# make temp directory for placement of indices
tmp_index_dir = tempfile.mkdtemp()
tmp_dir = tempfile.mkdtemp()
# index if necessary
#if options.fileSource == 'history':
if False:
ref_file = tempfile.NamedTemporaryFile( dir=tmp_index_dir )
ref_file_name = ref_file.name
ref_file.close()
os.symlink( options.ref, ref_file_name )
# determine which indexing algorithm to use, based on size
try:
size = os.stat( options.ref ).st_size
if size <= 2**30:
indexingAlg = 'is'
else:
indexingAlg = 'bwtsw'
except:
indexingAlg = 'is'
indexing_cmds = '-a %s' % indexingAlg
cmd1 = 'bwa index %s %s' % ( indexing_cmds, ref_file_name )
try:
tmp = tempfile.NamedTemporaryFile( dir=tmp_index_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=cmd1, shell=True, cwd=tmp_index_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
buffsize = 1048576
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
| tmp_stderr.close()
if returncode != 0:
raise Exception, stderr
except Exception, e:
# clean up temp dirs
if os.path.exists( tmp_index_dir ):
s | hutil.rmtree( tmp_index_dir )
if os.path.exists( tmp_dir ):
shutil.rmtree( tmp_dir )
raise Exception, 'Error indexing reference sequence. ' + str( e )
else:
ref_file_name = options.ref
# if options.illumina13qual:
# illumina_quals = "-I"
# else:
# illumina_quals = ""
# set up aligning and generate aligning command args
start_cmds = "bwa mem -t %s" % options.threads
#if False:
if options.interPairEnd:
start_cmds += ' -p'
if options.params != 'pre_set':
if options.minSeedLength is not None:
start_cmds += " -k %d" % options.minSeedLength
if options.bandWidth is not None:
start_cmds += " -w %d" % options.bandWidth
if options.offDiagonal is not None:
start_cmds += " -d %d" % options.offDiagonal
if options.internalSeeds is not None:
start_cmds += " -r %s" % options.internalSeeds
if options.seedsOccurrence is not None:
start_cmds += " -c %d" % options.seedsOccurrence
if options.mateRescue:
start_cmds += ' |
qsnake/h5py | h5py/tests/test_dataset.py | Python | bsd-3-clause | 1,798 | 0.013904 |
"""
Test basic behavior of h5py.highlevel.Dataset, not including slicing
or keyword arguments
"""
import numpy as np
import h5py
import unittest
from common import TestCasePlus, INTS, FLOATS, COMPLEX, STRINGS, res
class TestDataset(TestCasePlus):
def setUp(self):
self.f = h5py.File(res.get_name(), 'w')
def tearDown(self):
res.clear()
def make_dset(self, *args, **kwds):
if 'dset' in self.f:
del self.f['dset']
return self.f.create_dataset('dset', *args, **kwds)
def test_create(self | ):
# Test dataset creation from shape and type, or raw data
types = INTS + FLOATS + COMPLEX + STRINGS + ('b',)
shapes = ( (), (1,), (10,), (20,1,15), (7,200,1) )
for s in shapes:
srcdata = np.arange(np.product(s)).reshape(s)
for t in types:
msg = " | test %s %s" % (s, t)
data = srcdata.astype(t)
dset = self.make_dset(s, t)
dset[...] = data
assert np.all(dset[...] == data), "%r %r" % (dset[...], data)
dset = self.make_dset(data=data)
assert np.all(dset[...] == data), msg
def test_literal(self):
# Literal assignment for compound types
dtypes = [ [('a','i'), ('b','f')],
[('a','i'), ('b', [('c','i2'),('d','f')] ) ] ]
values = [ (42, 39.5),
(42, (356, 34.0)) ]
for val, dt in zip(values, dtypes):
ds = self.f.create_dataset('ds', (1,), dt)
arr = np.ndarray((1,), dtype=dt)
ds[0] = val
arr[0] = val
assert ds[0] == np.asscalar(arr[0]), "%r: %r" % (ds[0], arr)
del self.f['ds']
|
latticelabs/Mitty | mitty/tests/benchmarking/creed_test.py | Python | gpl-2.0 | 5,564 | 0.015636 | from nose.tools import assert_raises
import mitty.benchmarking.creed as creed
class MyRead:
def __init__(self, qname, secondary, paired, read1, unmapped, reference_id, pos, cigarstring):
self.qname, self.is_secondary, self.is_paired, self.is_read1 = qname, secondary, paired, read1
self.is_unmapped, self.reference_id, self.pos, self.cigarstring = unmapped, reference_id, pos, cigarstring
def analyze_read_test():
"""Read analysis"""
qname = '3|15|0|1|898|100|100=|0|744|100|24=2I74='
read = MyRead(qname=qname, secondary=False, paired=True, read1=True, unmapped=False, reference_id=14, pos=898, cigarstring='100M')
read_serial, chrom, cpy, ro, pos, rl, cigar, ro_m, pos_m, rl_m, cigar_m, chrom_c, pos_c, cigar_c, unmapped = creed.analyze_read(read, window=0, extended=False)
assert read_serial == 30, read_serial
assert chrom == 15, chrom
assert cpy == 0, cpy
assert ro == 1, ro
assert pos == 898, pos
assert cigar == '100M', cigar
assert chrom_c and pos_c and cigar_c == 1
read = MyRead(qname=qname, secondary=False, paired=True, read1=False, unmapped=False, reference_id=14, pos=744, cigarstring='24M2I74M')
read_serial, chrom, cpy, ro, pos, rl, cigar, ro_m, pos_m, rl_m, cigar_m, chrom_c, pos_c, cigar_c, unmapped = creed.analyze_read(read, window=0, extended=False)
assert read_serial == 31, read_serial
assert chrom == 15, chrom
assert cpy == 0, cpy
assert ro == 0, ro
assert pos == 744, pos
assert cigar == '24M2I74M', cigar
assert chrom_c and pos_c and cigar_c == 1
read = MyRead(qname=qname, secondary=False, paired=True, read1=False, unmapped=False, reference_id=14, pos=744, cigarstring='24M2I74M')
read_serial, chrom, cpy, ro, pos, rl, cigar, ro_m, pos_m, rl_m, cigar_m, chrom_c, pos_c, cigar_c, unmapped = creed.analyze_read(read, window=0, extended=False)
assert read_serial == 31, read_serial
assert chrom == 15, chrom
assert cpy == 0, cpy
assert ro == 0, ro
assert pos == 744, pos
assert cigar == '24M2I74M', cigar
assert chrom_c and pos_c and cigar_c == 1
read = MyRead(qname=qname, secondary=False, paired=True, read1=True, unmapped=False, reference_id=14, pos=898, cigarstring='24M2I74M')
read_serial, chrom, cpy, ro, pos, rl, cigar, ro_m, pos_m, rl_m, cigar_m, chrom_c, pos_c, cigar_c, unmapped = creed.analyze_read(read, window=0, extended=False)
assert read_serial == 30, read_serial
assert chrom == 15, chrom
assert cpy == 0, cpy
assert ro == 1, ro
assert pos == 898, pos
assert cigar == '100M', cigar
assert chrom_c == 1
assert pos_c == 1
assert cigar_c == 0
def analyze_read_test2():
"""Read analysis for reads inside long deletions."""
qname = '3|15|1|1|700|100|200S|0|900|100|100M'
read = MyRead(qname=qname, secondary=False, paired=True, read1=True, unmapped=False, reference_id=14, pos=700, cigarstring='100I')
read_serial, chrom, cpy, ro, pos, rl, cigar, ro_m, pos_m, rl_m, cigar_m, chrom_c, pos_c, cigar_c, unmapped = creed.analyze_read(read, window=0, extended=False)
assert read_serial == 30, read_serial
assert chrom == 15, chrom
assert cpy == 1, cpy
assert ro == 1, ro
assert pos == 700, pos
assert cigar == '200S', cigar
assert chrom_c == 1
assert pos_c == 1
assert cigar_c == 0
read = MyRead(qname=qname, secondary=False, paired=True, read1=True, unmapped=False, reference_id=14, pos=705, cigarstring='100I')
read_serial, chrom, cpy, ro, pos, rl, cigar, ro_m, pos_m, rl_m, cigar_m, chrom_c, pos_c, cigar_c, unmapped = creed.analyze_read(read, window=0, extended=False)
assert chrom_c == 1
assert pos_c == 0
assert cigar_c == 0
# # Test if we can do fuzzy matching if we soft clips and so on
# qname = '3|15|0|0|100|1M1000D99M|1|200|100='
# read = MyRead(qname=qname, secondary=False, paired=True, read1=True, unmapped=False, reference_id=14, pos=180, cigarstring='1S99M')
# read_serial, chrom, cpy, ro, pos, cigar, read_category = creed.analyze_read(read, window=0, extended=False)
# assert read_category == 0b100100, bin(read_category)
# def create_sample_misaligned_bam(per_bam_fname):
# """Utility function to create a sample BAM with specific read errors. Return us the feature positions and the
# correct answers for the three types of read categories as creed.count_reads_under_features should return from
# this file and the feature positions. See sketch in test folder"""
#
# feature_positions = [ # 0,1 = het 2 = hom
# (10, 20, 0), (70, 80, 2), (90, 91, 2), (110, 120, 1), (140, 141, 2), (150, 160, 1), (170, 180, 2), (210, 240, 0)]
#
# # ((correct start, correct stop, aligned start, aligned stop),
# # ( ... mate ...)), ...
#
# read_positions = [
# [ # For copy 0
# ((15, 30, 15, 30),
# (40, 50, 45, 55)),
# ((55, 65, 55, 65),
# (75, 85, 75, 85)),
# ((75, 95, 75, 95),
# (100, 110, 200, 220)),
# ((100, 110, 150, 160),
# (120, 130, 120, 130)),
# ((135, 150, 135, 150)
# (160, 175, 160, 175)),
# ((145, 155, 145, 155),
# (165, 175, 165, 175)),
# | ((195, 200, 195, 200),
# (215, 225, 230, 240),
# (230, 235, 230, 235))], # Copy 0 ...
# [ # Copy 1
# ((15, 30, 15, 30),
# (40, 50, 40, 50)),
# ((55, 65, 55, 65),
# (75, 80, 75, 80)),
# ((85, 95, 85, 95),
# (105, 115, 105, 115)),
# ((152, 158, 152, 158),
# | (172, 178, 172, 178))] # Copy 1
# ]
#
# import pysam
# bam_fp = pysam.AlignmentFile(per_bam_fname, 'wb')
# for r_pos in read_positions:
# r = pysam.AlignedSegment()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.