code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
"""
Form components for working with trees.
"""
from __future__ import unicode_literals
from django import forms
from django.forms.forms import NON_FIELD_ERRORS
from django.forms.util import ErrorList
try:
from django.utils.encoding import smart_text
except ImportError:
from django.utils.encoding import smart_unicode as smart_text
from django.utils.html import conditional_escape, mark_safe
from django.utils.translation import ugettext_lazy as _
from mptt.exceptions import InvalidMove
__all__ = ('TreeNodeChoiceField', 'TreeNodeMultipleChoiceField', 'TreeNodePositionField', 'MoveNodeForm')
# Fields ######################################################################
class TreeNodeChoiceField(forms.ModelChoiceField):
"""A ModelChoiceField for tree nodes."""
def __init__(self, queryset, *args, **kwargs):
self.level_indicator = kwargs.pop('level_indicator', '---')
# if a queryset is supplied, enforce ordering
if hasattr(queryset, 'model'):
mptt_opts = queryset.model._mptt_meta
queryset = queryset.order_by(mptt_opts.tree_id_attr, mptt_opts.left_attr)
super(TreeNodeChoiceField, self).__init__(queryset, *args, **kwargs)
def _get_level_indicator(self, obj):
level = getattr(obj, obj._mptt_meta.level_attr)
return mark_safe(conditional_escape(self.level_indicator) * level)
def label_from_instance(self, obj):
"""
Creates labels which represent the tree level of each node when
generating option labels.
"""
level_indicator = self._get_level_indicator(obj)
return mark_safe(level_indicator + ' ' + conditional_escape(smart_text(obj)))
class TreeNodeMultipleChoiceField(TreeNodeChoiceField, forms.ModelMultipleChoiceField):
"""A ModelMultipleChoiceField for tree nodes."""
def __init__(self, queryset, *args, **kwargs):
self.level_indicator = kwargs.pop('level_indicator', '---')
# if a queryset is supplied, enforce ordering
if hasattr(queryset, 'model'):
mptt_opts = queryset.model._mptt_meta
queryset = queryset.order_by(mptt_opts.tree_id_attr, mptt_opts.left_attr)
# For some reason ModelMultipleChoiceField constructor passes kwargs
# as args to its super(), which causes 'multiple values for keyword arg'
# error sometimes. So we skip it (that constructor does nothing anyway!)
forms.ModelChoiceField.__init__(self, queryset, *args, **kwargs)
class TreeNodePositionField(forms.ChoiceField):
"""A ChoiceField for specifying position relative to another node."""
FIRST_CHILD = 'first-child'
LAST_CHILD = 'last-child'
LEFT = 'left'
RIGHT = 'right'
DEFAULT_CHOICES = (
(FIRST_CHILD, _('First child')),
(LAST_CHILD, _('Last child')),
(LEFT, _('Left sibling')),
(RIGHT, _('Right sibling')),
)
def __init__(self, *args, **kwargs):
if 'choices' not in kwargs:
kwargs['choices'] = self.DEFAULT_CHOICES
super(TreeNodePositionField, self).__init__(*args, **kwargs)
# Forms #######################################################################
class MoveNodeForm(forms.Form):
"""
A form which allows the user to move a given node from one location
in its tree to another, with optional restriction of the nodes which
are valid target nodes for the move.
"""
target = TreeNodeChoiceField(queryset=None)
position = TreeNodePositionField()
def __init__(self, node, *args, **kwargs):
"""
The ``node`` to be moved must be provided. The following keyword
arguments are also accepted::
``valid_targets``
Specifies a ``QuerySet`` of valid targets for the move. If
not provided, valid targets will consist of everything other
node of the same type, apart from the node itself and any
descendants.
For example, if you want to restrict the node to moving
within its own tree, pass a ``QuerySet`` containing
everything in the node's tree except itself and its
descendants (to prevent invalid moves) and the root node (as
a user could choose to make the node a sibling of the root
node).
``target_select_size``
The size of the select element used for the target node.
Defaults to ``10``.
``position_choices``
A tuple of allowed position choices and their descriptions.
Defaults to ``TreeNodePositionField.DEFAULT_CHOICES``.
``level_indicator``
A string which will be used to represent a single tree level
in the target options.
"""
self.node = node
valid_targets = kwargs.pop('valid_targets', None)
target_select_size = kwargs.pop('target_select_size', 10)
position_choices = kwargs.pop('position_choices', None)
level_indicator = kwargs.pop('level_indicator', None)
super(MoveNodeForm, self).__init__(*args, **kwargs)
opts = node._mptt_meta
if valid_targets is None:
valid_targets = node._tree_manager.exclude(**{
opts.tree_id_attr: getattr(node, opts.tree_id_attr),
opts.left_attr + '__gte': getattr(node, opts.left_attr),
opts.right_attr + '__lte': getattr(node, opts.right_attr),
})
self.fields['target'].queryset = valid_targets
self.fields['target'].widget.attrs['size'] = target_select_size
if level_indicator:
self.fields['target'].level_indicator = level_indicator
if position_choices:
self.fields['position_choices'].choices = position_choices
def save(self):
"""
Attempts to move the node using the selected target and
position.
If an invalid move is attempted, the related error message will
be added to the form's non-field errors and the error will be
re-raised. Callers should attempt to catch ``InvalidNode`` to
redisplay the form with the error, should it occur.
"""
try:
self.node.move_to(self.cleaned_data['target'],
self.cleaned_data['position'])
return self.node
except InvalidMove as e:
self.errors[NON_FIELD_ERRORS] = ErrorList(e)
raise
class MPTTAdminForm(forms.ModelForm):
"""
A form which validates that the chosen parent for a node isn't one of
its descendants.
"""
def clean(self):
cleaned_data = super(MPTTAdminForm, self).clean()
opts = self._meta.model._mptt_meta
parent = cleaned_data.get(opts.parent_attr)
if self.instance and parent:
if parent.is_descendant_of(self.instance, include_self=True):
if opts.parent_attr not in self._errors:
self._errors[opts.parent_attr] = forms.util.ErrorList()
self._errors[opts.parent_attr].append(_('Invalid parent'))
del self.cleaned_data[opts.parent_attr]
return cleaned_data
|
holachek/ecosense
|
app/mptt/forms.py
|
Python
|
mit
| 7,159
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.enums",
marshal="google.ads.googleads.v9",
manifest={"ClickTypeEnum",},
)
class ClickTypeEnum(proto.Message):
r"""Container for enumeration of Google Ads click types.
"""
class ClickType(proto.Enum):
r"""Enumerates Google Ads click types."""
UNSPECIFIED = 0
UNKNOWN = 1
APP_DEEPLINK = 2
BREADCRUMBS = 3
BROADBAND_PLAN = 4
CALL_TRACKING = 5
CALLS = 6
CLICK_ON_ENGAGEMENT_AD = 7
GET_DIRECTIONS = 8
LOCATION_EXPANSION = 9
LOCATION_FORMAT_CALL = 10
LOCATION_FORMAT_DIRECTIONS = 11
LOCATION_FORMAT_IMAGE = 12
LOCATION_FORMAT_LANDING_PAGE = 13
LOCATION_FORMAT_MAP = 14
LOCATION_FORMAT_STORE_INFO = 15
LOCATION_FORMAT_TEXT = 16
MOBILE_CALL_TRACKING = 17
OFFER_PRINTS = 18
OTHER = 19
PRODUCT_EXTENSION_CLICKS = 20
PRODUCT_LISTING_AD_CLICKS = 21
SITELINKS = 22
STORE_LOCATOR = 23
URL_CLICKS = 25
VIDEO_APP_STORE_CLICKS = 26
VIDEO_CALL_TO_ACTION_CLICKS = 27
VIDEO_CARD_ACTION_HEADLINE_CLICKS = 28
VIDEO_END_CAP_CLICKS = 29
VIDEO_WEBSITE_CLICKS = 30
VISUAL_SITELINKS = 31
WIRELESS_PLAN = 32
PRODUCT_LISTING_AD_LOCAL = 33
PRODUCT_LISTING_AD_MULTICHANNEL_LOCAL = 34
PRODUCT_LISTING_AD_MULTICHANNEL_ONLINE = 35
PRODUCT_LISTING_ADS_COUPON = 36
PRODUCT_LISTING_AD_TRANSACTABLE = 37
PRODUCT_AD_APP_DEEPLINK = 38
SHOWCASE_AD_CATEGORY_LINK = 39
SHOWCASE_AD_LOCAL_STOREFRONT_LINK = 40
SHOWCASE_AD_ONLINE_PRODUCT_LINK = 42
SHOWCASE_AD_LOCAL_PRODUCT_LINK = 43
PROMOTION_EXTENSION = 44
SWIPEABLE_GALLERY_AD_HEADLINE = 45
SWIPEABLE_GALLERY_AD_SWIPES = 46
SWIPEABLE_GALLERY_AD_SEE_MORE = 47
SWIPEABLE_GALLERY_AD_SITELINK_ONE = 48
SWIPEABLE_GALLERY_AD_SITELINK_TWO = 49
SWIPEABLE_GALLERY_AD_SITELINK_THREE = 50
SWIPEABLE_GALLERY_AD_SITELINK_FOUR = 51
SWIPEABLE_GALLERY_AD_SITELINK_FIVE = 52
HOTEL_PRICE = 53
PRICE_EXTENSION = 54
HOTEL_BOOK_ON_GOOGLE_ROOM_SELECTION = 55
SHOPPING_COMPARISON_LISTING = 56
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleads/google-ads-python
|
google/ads/googleads/v9/enums/types/click_type.py
|
Python
|
apache-2.0
| 2,994
|
from setuptools import setup, Extension
from os.path import join as pjoin
from os.path import dirname
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(pjoin(dirname(__file__), fname)).read()
dummy_module = Extension('dummy_ortools_dependency',
sources = ['dummy/dummy_ortools_dependency.cc'],
DELETEUNIX extra_link_args=['/MANIFEST'],
)
setup(
name='py3-ortools',
version='2.VVVV',
packages=[
'ortools',
'ortools.algorithms',
'ortools.constraint_solver',
'ortools.graph',
'ortools.linear_solver',],
ext_modules = [dummy_module],
install_requires = [
'protobuf >= 2.8.0'],
package_data = {
'ortools.constraint_solver' : ['_pywrapcp.dll'],
'ortools.linear_solver' : ['_pywraplp.dll'],
'ortools.graph' : ['_pywrapgraph.dll'],
'ortools.algorithms' : ['_pywrapknapsack_solver.dll'],
DELETEWIN 'ortools' : ['libortools.DLL']
},
license='Apache 2.0',
author = 'Google Inc',
author_email = 'lperron@google.com',
description = 'Google OR-Tools python libraries and modules',
keywords = ('operations research, constraint programming, ' +
'linear programming,' + 'flow algorithms,' +
'python'),
url = 'https://developers.google.com/optimization/',
download_url = 'https://github.com/google/or-tools/releases',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Topic :: Office/Business :: Scheduling',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules'],
long_description = read('README.txt'),
)
|
pombredanne/or-tools
|
tools/setup_py3.py
|
Python
|
apache-2.0
| 2,291
|
import random
from direct.directnotify import DirectNotifyGlobal
from otp.otpbase import OTPLocalizer
from toontown.toonbase import TTLocalizer
notify = DirectNotifyGlobal.directNotify.newCategory('SuitDialog')
def getBrushOffIndex(suitName):
if SuitBrushOffs.has_key(suitName):
brushoffs = SuitBrushOffs[suitName]
else:
brushoffs = SuitBrushOffs[None]
num = len(brushoffs)
chunk = 100 / num
randNum = random.randint(0, 99)
count = chunk
for i in range(num):
if randNum < count:
return i
count += chunk
notify.error('getBrushOffs() - no brush off found!')
return
def getBrushOffText(suitName, index):
if SuitBrushOffs.has_key(suitName):
brushoffs = SuitBrushOffs[suitName]
else:
brushoffs = SuitBrushOffs[None]
return brushoffs[index]
SuitBrushOffs = OTPLocalizer.SuitBrushOffs
|
ksmit799/Toontown-Source
|
toontown/suit/SuitDialog.py
|
Python
|
mit
| 892
|
from instal.firstprinciples.TestEngine import InstalSingleShotTestRunner, InstalTestCase
from instal.instalexceptions import InstalTestNotImplemented
class InstalTraceText(InstalTestCase):
def test_trace_text_runs(self):
raise InstalTestNotImplemented
|
cblop/tropic
|
instal-linux/instal/firstprinciples/tracers/TestInstalTraceText.py
|
Python
|
epl-1.0
| 266
|
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Time handling for CWMP.
CWMP uses ISO 8601 time strings, and further specifies that UTC time be
used unless otherwise specified (and then, to my knowledge, never
specifies a case where another timezone can be used).
Python datetime objects are suitable for use with CWMP so long as
they contain a tzinfo specifying UTC offset=0. Most Python programmers
create datetime objects with no tzinfo, so we add one.
"""
__author__ = 'dgentry@google.com (Denton Gentry)'
import datetime
def format(arg):
"""Print a datetime with 'Z' for the UTC timezone, as CWMP requires."""
if not arg:
return '0001-01-01T00:00:00Z' # CWMP Unknown Time
elif isinstance(arg, float):
dt = datetime.datetime.utcfromtimestamp(arg)
else:
dt = arg
if not dt.tzinfo or not dt.tzinfo.utcoffset(dt):
if dt.microsecond:
return dt.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
else:
return dt.strftime('%Y-%m-%dT%H:%M:%SZ')
else:
return dt.isoformat()
def parse(arg):
# TODO(dgentry) handle timezone properly
try:
dt = datetime.datetime.strptime(arg, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
dt = datetime.datetime.strptime(arg, '%Y-%m-%dT%H:%M:%S.%fZ')
return dt
def valid(arg):
# pylint: disable-msg=W0702
try:
parse(arg)
except:
return False
return True
|
pombredanne/catawampus
|
tr/cwmpdate.py
|
Python
|
apache-2.0
| 1,898
|
'''
Suport for materials
'''
from material import *
|
andykee/pyoptools
|
pyoptools/raytrace/mat_lib/__init__.py
|
Python
|
bsd-3-clause
| 52
|
import logging
from yatcobot.plugins.notifiers import NotifierABC
logger = logging.getLogger(__name__)
class NotificationService:
def __init__(self):
self.active_notifiers = NotifierABC.get_enabled()
def send_notification(self, title, message):
"""Sends a message to all enabled notifiers"""
for notifier in self.active_notifiers:
notifier.notify(title, message)
def is_enabled(self):
"""Checks if any notifier is enabled"""
if len(self.active_notifiers) > 0:
return True
return False
|
buluba89/Yatcobot
|
yatcobot/notifier.py
|
Python
|
gpl-2.0
| 577
|
# -*- coding: utf-8 -*-
"""
sphinx.ext.autosummary
~~~~~~~~~~~~~~~~~~~~~~
Sphinx extension that adds an autosummary:: directive, which can be
used to generate function/method/attribute/etc. summary lists, similar
to those output eg. by Epydoc and other API doc generation tools.
An :autolink: role is also provided.
autosummary directive
---------------------
The autosummary directive has the form::
.. autosummary::
:nosignatures:
:toctree: generated/
module.function_1
module.function_2
...
and it generates an output table (containing signatures, optionally)
======================== =============================================
module.function_1(args) Summary line from the docstring of function_1
module.function_2(args) Summary line from the docstring
...
======================== =============================================
If the :toctree: option is specified, files matching the function names
are inserted to the toctree with the given prefix:
generated/module.function_1
generated/module.function_2
...
Note: The file names contain the module:: or currentmodule:: prefixes.
.. seealso:: autosummary_generate.py
autolink role
-------------
The autolink role functions as ``:obj:`` when the name referred can be
resolved to a Python object, and otherwise it becomes simple emphasis.
This can be used as the default role to make links 'smart'.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
import inspect
import posixpath
from docutils.parsers.rst import directives
from docutils.statemachine import ViewList
from docutils import nodes
from sphinx import addnodes
from sphinx.util.compat import Directive
# -- autosummary_toc node ------------------------------------------------------
class autosummary_toc(nodes.comment):
pass
def process_autosummary_toc(app, doctree):
"""Insert items described in autosummary:: to the TOC tree, but do
not generate the toctree:: list.
"""
env = app.builder.env
crawled = {}
def crawl_toc(node, depth=1):
crawled[node] = True
for j, subnode in enumerate(node):
try:
if (isinstance(subnode, autosummary_toc)
and isinstance(subnode[0], addnodes.toctree)):
env.note_toctree(env.docname, subnode[0])
continue
except IndexError:
continue
if not isinstance(subnode, nodes.section):
continue
if subnode not in crawled:
crawl_toc(subnode, depth+1)
crawl_toc(doctree)
def autosummary_toc_visit_html(self, node):
"""Hide autosummary toctree list in HTML output."""
raise nodes.SkipNode
def autosummary_noop(self, node):
pass
# -- autosummary_table node ----------------------------------------------------
class autosummary_table(nodes.comment):
pass
def autosummary_table_visit_html(self, node):
"""Make the first column of the table non-breaking."""
try:
tbody = node[0][0][-1]
for row in tbody:
col1_entry = row[0]
par = col1_entry[0]
for j, subnode in enumerate(list(par)):
if isinstance(subnode, nodes.Text):
new_text = unicode(subnode.astext())
new_text = new_text.replace(u" ", u"\u00a0")
par[j] = nodes.Text(new_text)
except IndexError:
pass
# -- autodoc integration -------------------------------------------------------
class FakeDirective:
env = {}
genopt = {}
def get_documenter(obj, parent):
"""Get an autodoc.Documenter class suitable for documenting the given
object.
*obj* is the Python object to be documented, and *parent* is an
another Python object (e.g. a module or a class) to which *obj*
belongs to.
"""
from sphinx.ext.autodoc import AutoDirective, DataDocumenter, \
ModuleDocumenter
if inspect.ismodule(obj):
# ModuleDocumenter.can_document_member always returns False
return ModuleDocumenter
# Construct a fake documenter for *parent*
if parent is not None:
parent_doc_cls = get_documenter(parent, None)
else:
parent_doc_cls = ModuleDocumenter
if hasattr(parent, '__name__'):
parent_doc = parent_doc_cls(FakeDirective(), parent.__name__)
else:
parent_doc = parent_doc_cls(FakeDirective(), "")
# Get the corrent documenter class for *obj*
classes = [cls for cls in AutoDirective._registry.values()
if cls.can_document_member(obj, '', False, parent_doc)]
if classes:
classes.sort(key=lambda cls: cls.priority)
return classes[-1]
else:
return DataDocumenter
# -- .. autosummary:: ----------------------------------------------------------
class Autosummary(Directive):
"""
Pretty table containing short signatures and summaries of functions etc.
autosummary can also optionally generate a hidden toctree:: node.
"""
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
has_content = True
option_spec = {
'toctree': directives.unchanged,
'nosignatures': directives.flag,
'template': directives.unchanged,
}
def warn(self, msg):
self.warnings.append(self.state.document.reporter.warning(
msg, line=self.lineno))
def run(self):
self.env = env = self.state.document.settings.env
self.genopt = {}
self.warnings = []
names = [x.strip().split()[0] for x in self.content
if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])]
items = self.get_items(names)
nodes = self.get_table(items)
if 'toctree' in self.options:
suffix = env.config.source_suffix
dirname = posixpath.dirname(env.docname)
tree_prefix = self.options['toctree'].strip()
docnames = []
for name, sig, summary, real_name in items:
docname = posixpath.join(tree_prefix, real_name)
if docname.endswith(suffix):
docname = docname[:-len(suffix)]
docname = posixpath.normpath(posixpath.join(dirname, docname))
if docname not in env.found_docs:
self.warn('toctree references unknown document %r'
% docname)
docnames.append(docname)
tocnode = addnodes.toctree()
tocnode['includefiles'] = docnames
tocnode['entries'] = [(None, docname) for docname in docnames]
tocnode['maxdepth'] = -1
tocnode['glob'] = None
tocnode = autosummary_toc('', '', tocnode)
nodes.append(tocnode)
return self.warnings + nodes
def get_items(self, names):
"""Try to import the given names, and return a list of
``[(name, signature, summary_string, real_name), ...]``.
"""
env = self.state.document.settings.env
prefixes = get_import_prefixes_from_env(env)
items = []
max_item_chars = 50
for name in names:
display_name = name
if name.startswith('~'):
name = name[1:]
display_name = name.split('.')[-1]
try:
real_name, obj, parent = import_by_name(name, prefixes=prefixes)
except ImportError:
self.warn('failed to import %s' % name)
items.append((name, '', '', name))
continue
# NB. using real_name here is important, since Documenters
# handle module prefixes slightly differently
documenter = get_documenter(obj, parent)(self, real_name)
if not documenter.parse_name():
self.warn('failed to parse name %s' % real_name)
items.append((display_name, '', '', real_name))
continue
if not documenter.import_object():
self.warn('failed to import object %s' % real_name)
items.append((display_name, '', '', real_name))
continue
# -- Grab the signature
sig = documenter.format_signature()
if not sig:
sig = ''
else:
max_chars = max(10, max_item_chars - len(display_name))
sig = mangle_signature(sig, max_chars=max_chars)
sig = sig.replace('*', r'\*')
# -- Grab the summary
doc = list(documenter.process_doc(documenter.get_doc()))
while doc and not doc[0].strip():
doc.pop(0)
m = re.search(r"^([A-Z][^A-Z]*?\.\s)", " ".join(doc).strip())
if m:
summary = m.group(1).strip()
elif doc:
summary = doc[0].strip()
else:
summary = ''
items.append((display_name, sig, summary, real_name))
return items
def get_table(self, items):
"""Generate a proper list of table nodes for autosummary:: directive.
*items* is a list produced by :meth:`get_items`.
"""
table_spec = addnodes.tabular_col_spec()
table_spec['spec'] = 'll'
table = autosummary_table('')
real_table = nodes.table('', classes=['longtable'])
table.append(real_table)
group = nodes.tgroup('', cols=2)
real_table.append(group)
group.append(nodes.colspec('', colwidth=10))
group.append(nodes.colspec('', colwidth=90))
body = nodes.tbody('')
group.append(body)
def append_row(*column_texts):
row = nodes.row('')
for text in column_texts:
node = nodes.paragraph('')
vl = ViewList()
vl.append(text, '<autosummary>')
self.state.nested_parse(vl, 0, node)
try:
if isinstance(node[0], nodes.paragraph):
node = node[0]
except IndexError:
pass
row.append(nodes.entry('', node))
body.append(row)
for name, sig, summary, real_name in items:
qualifier = 'obj'
if 'nosignatures' not in self.options:
col1 = ':%s:`%s <%s>`\ %s' % (qualifier, name, real_name, sig)
else:
col1 = ':%s:`%s <%s>`' % (qualifier, name, real_name)
col2 = summary
append_row(col1, col2)
return [table_spec, table]
def mangle_signature(sig, max_chars=30):
"""Reformat a function signature to a more compact form."""
s = re.sub(r"^\((.*)\)$", r"\1", sig).strip()
# Strip strings (which can contain things that confuse the code below)
s = re.sub(r"\\\\", "", s)
s = re.sub(r"\\'", "", s)
s = re.sub(r"'[^']*'", "", s)
# Parse the signature to arguments + options
args = []
opts = []
opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=")
while s:
m = opt_re.search(s)
if not m:
# The rest are arguments
args = s.split(', ')
break
opts.insert(0, m.group(2))
s = m.group(1)[:-2]
# Produce a more compact signature
sig = limited_join(", ", args, max_chars=max_chars-2)
if opts:
if not sig:
sig = "[%s]" % limited_join(", ", opts, max_chars=max_chars-4)
elif len(sig) < max_chars - 4 - 2 - 3:
sig += "[, %s]" % limited_join(", ", opts,
max_chars=max_chars-len(sig)-4-2)
return u"(%s)" % sig
def limited_join(sep, items, max_chars=30, overflow_marker="..."):
"""Join a number of strings to one, limiting the length to *max_chars*.
If the string overflows this limit, replace the last fitting item by
*overflow_marker*.
Returns: joined_string
"""
full_str = sep.join(items)
if len(full_str) < max_chars:
return full_str
n_chars = 0
n_items = 0
for j, item in enumerate(items):
n_chars += len(item) + len(sep)
if n_chars < max_chars - len(overflow_marker):
n_items += 1
else:
break
return sep.join(list(items[:n_items]) + [overflow_marker])
# -- Importing items -----------------------------------------------------------
def get_import_prefixes_from_env(env):
"""
Obtain current Python import prefixes (for `import_by_name`)
from ``document.env``
"""
prefixes = [None]
currmodule = env.temp_data.get('py:module')
if currmodule:
prefixes.insert(0, currmodule)
currclass = env.temp_data.get('py:class')
if currclass:
if currmodule:
prefixes.insert(0, currmodule + "." + currclass)
else:
prefixes.insert(0, currclass)
return prefixes
def import_by_name(name, prefixes=[None]):
"""Import a Python object that has the given *name*, under one of the
*prefixes*. The first name that succeeds is used.
"""
tried = []
for prefix in prefixes:
try:
if prefix:
prefixed_name = '.'.join([prefix, name])
else:
prefixed_name = name
obj, parent = _import_by_name(prefixed_name)
return prefixed_name, obj, parent
except ImportError:
tried.append(prefixed_name)
raise ImportError('no module named %s' % ' or '.join(tried))
def _import_by_name(name):
"""Import a Python object given its full name."""
try:
name_parts = name.split('.')
# try first interpret `name` as MODNAME.OBJ
modname = '.'.join(name_parts[:-1])
if modname:
try:
__import__(modname)
mod = sys.modules[modname]
return getattr(mod, name_parts[-1]), mod
except (ImportError, IndexError, AttributeError):
pass
# ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ...
last_j = 0
modname = None
for j in reversed(range(1, len(name_parts)+1)):
last_j = j
modname = '.'.join(name_parts[:j])
try:
__import__(modname)
except:# ImportError:
continue
if modname in sys.modules:
break
if last_j < len(name_parts):
parent = None
obj = sys.modules[modname]
for obj_name in name_parts[last_j:]:
parent = obj
obj = getattr(obj, obj_name)
return obj, parent
else:
return sys.modules[modname], None
except (ValueError, ImportError, AttributeError, KeyError), e:
raise ImportError(*e.args)
# -- :autolink: (smart default role) -------------------------------------------
def autolink_role(typ, rawtext, etext, lineno, inliner,
options={}, content=[]):
"""Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
otherwise expands to '*text*'.
"""
env = inliner.document.settings.env
r = env.get_domain('py').role('obj')(
'obj', rawtext, etext, lineno, inliner, options, content)
pnode = r[0][0]
prefixes = get_import_prefixes_from_env(env)
try:
name, obj, parent = import_by_name(pnode['reftarget'], prefixes)
except ImportError:
content = pnode[0]
r[0][0] = nodes.emphasis(rawtext, content[0].astext(),
classes=content['classes'])
return r
def process_generate_options(app):
genfiles = app.config.autosummary_generate
ext = app.config.source_suffix
if genfiles and not hasattr(genfiles, '__len__'):
env = app.builder.env
genfiles = [x + ext for x in env.found_docs
if os.path.isfile(env.doc2path(x))]
if not genfiles:
return
from generate import generate_autosummary_docs
genfiles = [genfile + (not genfile.endswith(ext) and ext or '')
for genfile in genfiles]
generate_autosummary_docs(genfiles, builder=app.builder,
warn=app.warn, info=app.info, suffix=ext,
base_path=app.srcdir)
def setup(app):
# I need autodoc
app.setup_extension('sphinx.ext.autodoc')
app.add_node(autosummary_toc,
html=(autosummary_toc_visit_html, autosummary_noop),
latex=(autosummary_noop, autosummary_noop),
text=(autosummary_noop, autosummary_noop),
man=(autosummary_noop, autosummary_noop),
texinfo=(autosummary_noop, autosummary_noop))
app.add_node(autosummary_table,
html=(autosummary_table_visit_html, autosummary_noop),
latex=(autosummary_noop, autosummary_noop),
text=(autosummary_noop, autosummary_noop),
man=(autosummary_noop, autosummary_noop),
texinfo=(autosummary_noop, autosummary_noop))
app.add_directive('autosummary', Autosummary)
app.add_role('autolink', autolink_role)
app.connect('doctree-read', process_autosummary_toc)
app.connect('builder-inited', process_generate_options)
app.add_config_value('autosummary_generate', [], True)
|
shadowmint/nwidget
|
lib/pyglet-1.4.4/doc/ext/autosummary/__init__.py
|
Python
|
apache-2.0
| 17,735
|
# -*- coding: utf-8 -*-
import unittest
from dialogue_system.language_understanding.attribute_extraction.rule_based_extractor import RuleBasedAttributeExtractor
class AttributeExtractorTest(unittest.TestCase):
def setUp(self):
self.extractor = RuleBasedAttributeExtractor()
def tearDown(self):
pass
def test_extract(self):
attribute = self.extractor.extract(text='ラーメンを食べたい')
self.assertEqual(attribute, {'LOCATION': '', 'GENRE': 'ラーメン', 'MAXIMUM_AMOUNT': ''})
attribute = self.extractor.extract(text='西新宿のあたり')
self.assertEqual(attribute, {'LOCATION': '西新宿', 'GENRE': '', 'MAXIMUM_AMOUNT': ''})
attribute = self.extractor.extract(text='1000円以下で')
self.assertEqual(attribute, {'LOCATION': '', 'GENRE': '', 'MAXIMUM_AMOUNT': '1000'})
|
syugoing/communication
|
tests/language_understanding/attribute_extraction/extractor_test.py
|
Python
|
mit
| 868
|
# Copyright 2016 Casey Jaymes
# This file is part of Expatriate.
#
# Expatriate is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Expatriate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Expatriate. If not, see <http://www.gnu.org/licenses/>.
import logging
import re
from ..decorators import *
from .DurationType import DurationType
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class DayTimeDurationType(DurationType):
def parse_value(self, value):
m = re.fullmatch(r'-?P(\d+D)?(T(\d+H)?(\d+M)?(\d+(\.\d+)?S)?)?', value)
if not m or not re.fullmatch(r'.*[DHMS].*', value) or not re.fullmatch(r'.*[^T]', value):
raise ValueError('Unable to parse xs:DayTimeDurationType value')
return super().parse_value(value)
def produce_value(self, value):
months, seconds = value
if months != 0:
raise ValueError('xs:DayTimeDurationType requires 0 for months value')
return super().produce_value(value)
|
cjaymes/expatriate
|
src/expatriate/model/xs/DayTimeDurationType.py
|
Python
|
lgpl-3.0
| 1,473
|
"""
============================================================
Reconstruction of the diffusion signal with the Tensor model
============================================================
The diffusion tensor model is a model that describes the diffusion within a
voxel. First proposed by Basser and colleagues [Basser1994]_, it has been very
influential in demonstrating the utility of diffusion MRI in characterizing the
micro-structure of white matter tissue and of the biophysical properties of
tissue, inferred from local diffusion properties and it is still very commonly
used.
The diffusion tensor models the diffusion signal as:
.. math::
\frac{S(\mathbf{g}, b)}{S_0} = e^{-b\mathbf{g}^T \mathbf{D} \mathbf{g}}
Where $\mathbf{g}$ is a unit vector in 3 space indicating the direction of
measurement and b are the parameters of measurement, such as the strength and
duration of diffusion-weighting gradient. $S(\mathbf{g}, b)$ is the
diffusion-weighted signal measured and $S_0$ is the signal conducted in a
measurement with no diffusion weighting. $\mathbf{D}$ is a positive-definite quadratic
form, which contains six free parameters to be fit. These six parameters are:
.. math::
\mathbf{D} = \begin{pmatrix} D_{xx} & D_{xy} & D_{xz} \\
D_{yx} & D_{yy} & D_{yz} \\
D_{zx} & D_{zy} & D_{zz} \\ \end{pmatrix}
This matrix is a variance/covariance matrix of the diffusivity along the three
spatial dimensions. Note that we can assume that diffusivity has antipodal
symmetry, so elements across the diagonal are equal. For example:
$D_{xy} = D_{yx}$. This is why there are only 6 free parameters to estimate
here.
In the following example we show how to reconstruct your diffusion datasets
using a single tensor model.
First import the necessary modules:
``numpy`` is for numerical computation
"""
import numpy as np
"""
``nibabel`` is for loading imaging datasets
"""
import nibabel as nib
"""
``dipy.reconst`` is for the reconstruction algorithms which we use to create
voxel models from the raw data.
"""
import dipy.reconst.dti as dti
"""
``dipy.data`` is used for small datasets that we use in tests and examples.
"""
from dipy.data import fetch_stanford_hardi
"""
Fetch will download the raw dMRI dataset of a single subject. The size of the
dataset is 87 MBytes. You only need to fetch once.
"""
fetch_stanford_hardi()
"""
Next, we read the saved dataset
"""
from dipy.data import read_stanford_hardi
img, gtab = read_stanford_hardi()
"""
img contains a nibabel Nifti1Image object (with the data) and gtab contains a
GradientTable object (information about the gradients e.g. b-values and
b-vectors).
"""
data = img.get_data()
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
data.shape ``(81, 106, 76, 160)``
First of all, we mask and crop the data. This is a quick way to avoid
calculating Tensors on the background of the image. This is done using dipy's
mask module.
"""
from dipy.segment.mask import median_otsu
maskdata, mask = median_otsu(data, 3, 1, True,
vol_idx=range(10, 50), dilate=2)
print('maskdata.shape (%d, %d, %d, %d)' % maskdata.shape)
"""
maskdata.shape ``(72, 87, 59, 160)``
Now that we have prepared the datasets we can go forward with the voxel
reconstruction. First, we instantiate the Tensor model in the following way.
"""
tenmodel = dti.TensorModel(gtab)
"""
Fitting the data is very simple. We just need to call the fit method of the
TensorModel in the following way:
"""
tenfit = tenmodel.fit(maskdata)
"""
The fit method creates a TensorFit object which contains the fitting parameters
and other attributes of the model. For example we can generate fractional
anisotropy (FA) from the eigen-values of the tensor. FA is used to characterize
the degree to which the distribution of diffusion in a voxel is
directional. That is, whether there is relatively unrestricted diffusion in one
particular direction.
Mathematically, FA is defined as the normalized variance of the eigen-values of
the tensor:
.. math::
FA = \sqrt{\frac{1}{2}\frac{(\lambda_1-\lambda_2)^2+(\lambda_1-
\lambda_3)^2+(\lambda_2-\lambda_3)^2}{\lambda_1^2+
\lambda_2^2+\lambda_3^2}}
Note that FA should be interpreted carefully. It may be an indication of the
density of packing of fibers in a voxel, and the amount of myelin wrapping these
axons, but it is not always a measure of "tissue integrity". For example, FA
may decrease in locations in which there is fanning of white matter fibers, or
where more than one population of white matter fibers crosses.
"""
print('Computing anisotropy measures (FA, MD, RGB)')
from dipy.reconst.dti import fractional_anisotropy, color_fa, lower_triangular
FA = fractional_anisotropy(tenfit.evals)
"""
In the background of the image the fitting will not be accurate there is no
signal and possibly we will find FA values with nans (not a number). We can
easily remove these in the following way.
"""
FA[np.isnan(FA)] = 0
"""
Saving the FA images is very easy using nibabel. We need the FA volume and the
affine matrix which transform the image's coordinates to the world coordinates.
Here, we choose to save the FA in float32.
"""
fa_img = nib.Nifti1Image(FA.astype(np.float32), img.get_affine())
nib.save(fa_img, 'tensor_fa.nii.gz')
"""
You can now see the result with any nifti viewer or check it slice by slice
using matplotlib_'s imshow. In the same way you can save the eigen values, the
eigen vectors or any other properties of the Tensor.
"""
evecs_img = nib.Nifti1Image(tenfit.evecs.astype(np.float32), img.get_affine())
nib.save(evecs_img, 'tensor_evecs.nii.gz')
"""
Other tensor statistics can be calculated from the `tenfit` object. For example,
a commonly calculated statistic is the mean diffusivity (MD). This is simply the
mean of the eigenvalues of the tensor. Since FA is a normalized
measure of variance and MD is the mean, they are often used as complimentary
measures. In `dipy`, there are two equivalent ways to calculate the mean
diffusivity. One is by calling the `mean_diffusivity` module function on the
eigen-values of the TensorFit class instance:
"""
MD1 = dti.mean_diffusivity(tenfit.evals)
nib.save(nib.Nifti1Image(MD1.astype(np.float32), img.get_affine()), 'tensors_md.nii.gz')
"""
The other is to call the TensorFit class method:
"""
MD2 = tenfit.md
"""
Obviously, the quantities are identical.
We can also compute the colored FA or RGB-map [Pajevic1999]_. First, we make sure
that the FA is scaled between 0 and 1, we compute the RGB map and save it.
"""
FA = np.clip(FA, 0, 1)
RGB = color_fa(FA, tenfit.evecs)
nib.save(nib.Nifti1Image(np.array(255 * RGB, 'uint8'), img.get_affine()), 'tensor_rgb.nii.gz')
"""
Let's try to visualize the tensor ellipsoids of a small rectangular
area in an axial slice of the splenium of the corpus callosum (CC).
"""
print('Computing tensor ellipsoids in a part of the splenium of the CC')
from dipy.data import get_sphere
sphere = get_sphere('symmetric724')
from dipy.viz import fvtk
ren = fvtk.ren()
evals = tenfit.evals[13:43, 44:74, 28:29]
evecs = tenfit.evecs[13:43, 44:74, 28:29]
"""
We can color the ellipsoids using the ``color_fa`` values that we calculated
above. In this example we additionally normalize the values to increase the
contrast.
"""
cfa = RGB[13:43, 44:74, 28:29]
cfa /= cfa.max()
fvtk.add(ren, fvtk.tensor(evals, evecs, cfa, sphere))
print('Saving illustration as tensor_ellipsoids.png')
fvtk.record(ren, n_frames=1, out_path='tensor_ellipsoids.png', size=(600, 600))
"""
.. figure:: tensor_ellipsoids.png
:align: center
**Tensor Ellipsoids**.
"""
fvtk.clear(ren)
"""
Finally, we can visualize the tensor orientation distribution functions
for the same area as we did with the ellipsoids.
"""
tensor_odfs = tenmodel.fit(data[20:50, 55:85, 38:39]).odf(sphere)
fvtk.add(ren, fvtk.sphere_funcs(tensor_odfs, sphere, colormap=None))
#fvtk.show(r)
print('Saving illustration as tensor_odfs.png')
fvtk.record(ren, n_frames=1, out_path='tensor_odfs.png', size=(600, 600))
"""
.. figure:: tensor_odfs.png
:align: center
**Tensor ODFs**.
Note that while the tensor model is an accurate and reliable model of the
diffusion signal in the white matter, it has the drawback that it only has one
principal diffusion direction. Therefore, in locations in the brain that
contain multiple fiber populations crossing each other, the tensor model may
indicate that the principal diffusion direction is intermediate to these
directions. Therefore, using the principal diffusion direction for tracking in
these locations may be misleading and may lead to errors in defining the
tracks. Fortunately, other reconstruction methods can be used to represent the
diffusion and fiber orientations in those locations. These are presented in
other examples.
.. [Basser1994] Basser PJ, Mattielo J, LeBihan (1994). MR diffusion tensor
spectroscopy and imaging.
.. [Pajevic1999] Pajevic S, Pierpaoli (1999). Color schemes to represent
the orientation of anisotropic tissues from diffusion tensor
data: application to white matter fiber tract mapping in
the human brain.
.. include:: ../links_names.inc
"""
|
StongeEtienne/dipy
|
doc/examples/reconst_dti.py
|
Python
|
bsd-3-clause
| 9,303
|
from enigma import eEPGCache
from Components.Converter.Converter import Converter
from Components.Element import cached
from Components.Converter.genre import getGenreStringSub
class EventName(Converter, object):
NAME = 0
SHORT_DESCRIPTION = 1
EXTENDED_DESCRIPTION = 2
FULL_DESCRIPTION = 3
ID = 4
NAME_NOW = 5
NAME_NEXT = 6
NAME_NEXT2 = 7
GENRE = 8
RATING = 9
SRATING = 10
SRATING = 11
PDC = 12
PDCTIME = 13
PDCTIMESHORT = 14
ISRUNNINGSTATUS = 15
NEXT_DESCRIPTION = 21
THIRD_NAME = 22
THIRD_DESCRIPTION = 23
def __init__(self, type):
Converter.__init__(self, type)
self.epgcache = eEPGCache.getInstance()
if type == "Description":
self.type = self.SHORT_DESCRIPTION
elif type == "ExtendedDescription":
self.type = self.EXTENDED_DESCRIPTION
elif type == "FullDescription":
self.type = self.FULL_DESCRIPTION
elif type == "ID":
self.type = self.ID
elif type == "NameNow" or type == "NowName":
self.type = self.NAME_NOW
elif type == "NameNext" or type == "NextName":
self.type = self.NAME_NEXT
elif type == "NameNextOnly" or type == "NextNameOnly":
self.type = self.NAME_NEXT2
elif type == "Genre":
self.type = self.GENRE
elif type == "Rating":
self.type = self.RATING
elif type == "SmallRating":
self.type = self.SRATING
elif type == "Pdc":
self.type = self.PDC
elif type == "PdcTime":
self.type = self.PDCTIME
elif type == "PdcTimeShort":
self.type = self.PDCTIMESHORT
elif type == "IsRunningStatus":
self.type = self.ISRUNNINGSTATUS
elif type == "NextDescription":
self.type = self.NEXT_DESCRIPTION
elif type == "ThirdName":
self.type = self.THIRD_NAME
elif type == "ThirdDescription":
self.type = self.THIRD_DESCRIPTION
else:
self.type = self.NAME
@cached
def getBoolean(self):
event = self.source.event
if event is None:
return False
if self.type == self.PDC:
if event.getPdcPil():
return True
return False
boolean = property(getBoolean)
@cached
def getText(self):
event = self.source.event
if event is None:
return ""
if self.type == self.NAME:
return event.getEventName()
elif self.type == self.SRATING:
rating = event.getParentalData()
if rating is None:
return ""
else:
country = rating.getCountryCode()
age = rating.getRating()
if age == 0:
return _("All ages")
elif age > 15:
return _("bc%s") % age
else:
age += 3
return " %d+" % age
elif self.type == self.RATING:
rating = event.getParentalData()
if rating is None:
return ""
else:
country = rating.getCountryCode()
age = rating.getRating()
if age == 0:
return _("Rating undefined")
elif age > 15:
return _("Rating defined by broadcaster - %d") % age
else:
age += 3
return _("Minimum age %d years") % age
elif self.type == self.GENRE:
genre = event.getGenreData()
if genre is None:
return ""
else:
return getGenreStringSub(genre.getLevel1(), genre.getLevel2())
elif self.type == self.NAME_NOW:
return pgettext("now/next: 'now' event label", "Now") + ": " + event.getEventName()
elif self.type == self.SHORT_DESCRIPTION:
return event.getShortDescription()
elif self.type == self.EXTENDED_DESCRIPTION:
return event.getExtendedDescription() or event.getShortDescription()
elif self.type == self.FULL_DESCRIPTION:
description = event.getShortDescription()
extended = event.getExtendedDescription()
if description and extended:
description += '\n'
return description + extended
elif self.type == self.ID:
return str(event.getEventId())
elif self.type == self.PDC:
if event.getPdcPil():
return _("PDC")
return ""
elif self.type in (self.PDCTIME, self.PDCTIMESHORT):
pil = event.getPdcPil()
if pil:
if self.type == self.PDCTIMESHORT:
return _("%02d:%02d") % ((pil & 0x7C0) >> 6, (pil & 0x3F))
return _("%d.%02d. %02d:%02d") % ((pil & 0xF8000) >> 15, (pil & 0x7800) >> 11, (pil & 0x7C0) >> 6, (pil & 0x3F))
return ""
elif self.type == self.ISRUNNINGSTATUS:
if event.getPdcPil():
running_status = event.getRunningStatus()
if running_status == 1:
return "not running"
if running_status == 2:
return "starts in a few seconds"
if running_status == 3:
return "pausing"
if running_status == 4:
return "running"
if running_status == 5:
return "service off-air"
if running_status in (6,7):
return "reserved for future use"
return "undefined"
return ""
elif int(self.type) in (6,7) or int(self.type) >= 21:
try:
reference = self.source.service
info = reference and self.source.info
if info is None:
return
test = [ 'ITSECX', (reference.toString(), 1, -1, 1440) ] # search next 24 hours
self.list = [] if self.epgcache is None else self.epgcache.lookupEvent(test)
if self.list:
if self.type == self.NAME_NEXT and self.list[1][1]:
return pgettext("now/next: 'next' event label", "Next") + ": " + self.list[1][1]
elif self.type == self.NAME_NEXT2 and self.list[1][1]:
return self.list[1][1]
elif self.type == self.NEXT_DESCRIPTION and (self.list[1][2] or self.list[1][3]):
description = self.list[1][2]
extended = self.list[1][3]
if (description and extended) and (description[0:20] != extended[0:20]):
description += '\n'
return description + extended
elif self.type == self.THIRD_NAME and self.list[2][1]:
return pgettext("third event: 'third' event label", "Later") + ": " + self.list[2][1]
elif self.type == self.THIRD_DESCRIPTION and (self.list[2][2] or self.list[2][3]):
description = self.list[2][2]
extended = self.list[2][3]
if (description and extended) and (description[0:20] != extended[0:20]):
description += '\n'
return description + extended
else:
# failed to return any epg data.
return ""
except:
# failed to return any epg data.
if self.type == self.NAME_NEXT:
return pgettext("now/next: 'next' event label", "Next") + ": " + event.getEventName()
return ""
text = property(getText)
|
mrnamingo/vix4-34-enigma2-bcm
|
lib/python/Components/Converter/EventName.py
|
Python
|
gpl-2.0
| 6,169
|
from .models import Experiment
from django.test import TestCase
class ExperimentTest(TestCase):
def setUp(self):
self.experiment = Experiment.objects.create(
name = 'Example Experiment',
slug = 'example')
self.event_1 = self.experiment.event_set.create(
extra_data = '{"foo": 1, "bar": 2}',
)
self.event_2 = self.experiment.event_set.create(
extra_data = '{"foo": 3, "quux": 2}'
)
def test_extra_fields(self):
self.assertEqual(
self.experiment.extra_fields,
set(['foo', 'bar', 'quux']))
def tearDown(self):
self.event_1.delete()
self.event_2.delete()
self.experiment.delete()
|
mysociety/pombola
|
pombola/experiments/tests.py
|
Python
|
agpl-3.0
| 737
|
import ew
class Include(ew.Widget):
template='jinja:allura:templates/widgets/include.html'
params=['artifact', 'attrs']
artifact=None
attrs = {
'style':'width:270px;float:right;background-color:#ccc'
}
class DownloadButton(ew.Widget):
template='jinja:allura:templates/widgets/download_button.html'
params=['project']
project=None
def resources(self):
yield ew.jinja2_ew.JSScript('''
$(function(){$(".download-button-%s").load("%s");
});''' % (self.project._id,self.project.best_download_url()))
|
Bitergia/allura
|
Allura/allura/lib/widgets/macros.py
|
Python
|
apache-2.0
| 574
|
# -*- coding: utf-8 -*-
__author__ = 'eso'
import sys
sys.path.append('../../')
from tools.petscan import PetScan
import re
import requests
import pywikibot
test_sites = ['Edda',
'Edda/Erläuterungen/Anmerkungen']
sites = ['Edda/Erläuterungen/Einleitung',
'Edda/Register',
'Edda/Snorra-Edda/Aus der Skalda',
'Edda/Snorra-Edda/Bragarœdhur',
'Edda/Snorra-Edda/Gylfaginnîng',
'Edda/Snorra-Edda/Sôlarliôth',
'Edda/Ältere Edda/Alvîssmâl',
'Edda/Ältere Edda/Atlakvidha',
'Edda/Ältere Edda/Atlamâl',
'Edda/Ältere Edda/Brot af Brynhildarkvidhu',
'Edda/Ältere Edda/Drâp Niflunga',
'Edda/Ältere Edda/Fafnismâl',
'Edda/Ältere Edda/Fiölsvinnsmâl',
'Edda/Ältere Edda/Grimnismâl',
'Edda/Ältere Edda/Grôgaldr',
'Edda/Ältere Edda/Gudhrûnarhvöt',
'Edda/Ältere Edda/Gudhrûnarkvidha fyrsta',
'Edda/Ältere Edda/Gudhrûnarkvidha thridhja',
'Edda/Ältere Edda/Gudhrûnarkvidha önnur',
'Edda/Ältere Edda/Hamdismâl',
'Edda/Ältere Edda/Harbardhsliodh',
'Edda/Ältere Edda/Helgakvidha Hjörvardhssonar',
'Edda/Ältere Edda/Helgakvidha Hundingsbana fyrri',
'Edda/Ältere Edda/Helgakvidha Hundingsbana önnur',
'Edda/Ältere Edda/Helreidh Brynhildar',
'Edda/Ältere Edda/Hrafnagaldr Ôdhins',
'Edda/Ältere Edda/Hyndluliod',
'Edda/Ältere Edda/Hâvamâl',
'Edda/Ältere Edda/Hŷmiskvidha',
'Edda/Ältere Edda/Oddrûnargrâtr',
'Edda/Ältere Edda/Oegisdrecka',
'Edda/Ältere Edda/Rîgsmâl',
'Edda/Ältere Edda/Sigrdrîfumâl',
'Edda/Ältere Edda/Sigurdharkvidha Fafnisbana fyrsta edha Grîpisspâ',
'Edda/Ältere Edda/Sigurdharkvidha Fafnisbana thridhja',
'Edda/Ältere Edda/Sigurdharkvidha Fafnisbana önnur',
'Edda/Ältere Edda/Sinfiötlalok',
'Edda/Ältere Edda/Skîrnisför',
'Edda/Ältere Edda/Thrymskvidha oder Hamarsheimt',
'Edda/Ältere Edda/Vafthrûdhnismâl',
'Edda/Ältere Edda/Vegtamskvidha',
'Edda/Ältere Edda/Völundarkvidha',
'Edda/Ältere Edda/Völuspâ']
site = pywikibot.Site('de', 'wikisource')
for lemma in sites:
page = pywikibot.Page(site, lemma)
page.delete(reason= 'Verschieberest', prompt=False)
|
the-it/WS_THEbotIT
|
archive/online/2015/150810_Edda_delete.py
|
Python
|
mit
| 2,429
|
"""
LandmarkTransformationTool (TransformationTool)
:Authors:
Berend Klein Haneveld
"""
from Landmark import Landmark
from TransformationTool import TransformationTool
from ui.widgets.PointsWidget import PointsWidget
from ui.widgets.StatusWidget import StatusWidget
from ui.transformations.TwoStepPicker import TwoStepPicker
from ui.transformations.SurfacePicker import SurfacePicker
from ui.transformations import Transformation
from core.decorators import overrides
from core.vtkDrawing import TransformWithMatrix
from core.project import ProjectController
from vtk import vtkPoints
from vtk import vtkLandmarkTransform
from vtk import vtkTransform
from PySide.QtGui import QWidget
from PySide.QtGui import QGridLayout
from PySide.QtGui import QComboBox
from PySide.QtGui import QLabel
from PySide.QtCore import Signal
from PySide.QtCore import Slot
from PySide.QtCore import Qt
# Define picker types
SurfaceType = "SurfaceType"
TwoStepType = "TwoStepType"
class LandmarkTransformationTool(TransformationTool):
"""
LandmarkTransformationTool
"""
updatedLandmarks = Signal(list)
def __init__(self):
super(LandmarkTransformationTool, self).__init__()
self.fixedPickerType = SurfaceType
self.movingPickerType = SurfaceType
self.fixedPicker = self._pickerForType(self.fixedPickerType)
self.movingPicker = self._pickerForType(self.movingPickerType)
self.landmarkPointSets = [] # Sets of points
self.landmarkIndicators = [] # All the landmark indicator objects
self.originalTransform = None
self.originalScalingTransform = None
self.activeIndex = 0
self.landmarkTransformType = 0 # Rigid, Similarity or Affine
@overrides(TransformationTool)
def getParameterWidget(self):
self.pointsWidget = PointsWidget()
self.landmarkComboBox = QComboBox()
self.landmarkComboBox.addItem("Rigid body")
self.landmarkComboBox.addItem("Similarity")
self.landmarkComboBox.addItem("Affine")
layout = QGridLayout()
layout.setAlignment(Qt.AlignTop)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(QLabel("Transform type:"), 0, 0)
layout.addWidget(self.landmarkComboBox, 0, 1)
layout.addWidget(self.pointsWidget, 1, 0, 1, 2)
self.updatedLandmarks.connect(self.pointsWidget.setPoints)
self.landmarkComboBox.currentIndexChanged.connect(self.landmarkTransformTypeChanged)
self.pointsWidget.activeLandmarkChanged.connect(self.setActiveLandmark)
self.pointsWidget.landmarkDeleted.connect(self.deleteLandmark)
widget = QWidget()
widget.setLayout(layout)
return widget
@overrides(TransformationTool)
def setRenderWidgets(self, fixed=None, moving=None, multi=None):
self.fixedWidget = fixed
self.movingWidget = moving
self.multiWidget = multi
self.fixedPicker.setWidget(self.fixedWidget)
self.movingPicker.setWidget(self.movingWidget)
self.fixedPicker.pickedLocation.connect(self.pickedFixedLocation)
self.movingPicker.pickedLocation.connect(self.pickedMovingLocation)
# Save the original complete transform
self.originalTransform = self.multiWidget.transformations.completeTransform()
self.originalScalingTransform = self.multiWidget.transformations.scalingTransform()
# Add a new transform on top of the others
currentProject = ProjectController.Instance().currentProject
transform = Transformation(vtkTransform(), Transformation.TypeLandmark, currentProject.movingData)
self.multiWidget.transformations.append(transform)
statusWidget = StatusWidget.Instance()
statusWidget.setText("Place landmarks in both volumes to create a landmark transform. "
"Available methods for placing landmarks are the surface type and the two-step type.")
def setLandmarkWidgets(self, fixed, moving):
self.fixedLandmarkWidget = fixed
self.movingLandmarkWidget = moving
self.fixedLandmarkWidget.landmarkTypeChanged.connect(self.landmarkToolTypeChanged)
self.movingLandmarkWidget.landmarkTypeChanged.connect(self.landmarkToolTypeChanged)
self.fixedPicker.setPropertiesWidget(self.fixedLandmarkWidget)
self.movingPicker.setPropertiesWidget(self.movingLandmarkWidget)
@overrides(TransformationTool)
def cancelTransform(self):
del self.multiWidget.transformations[-1]
@overrides(TransformationTool)
def applyTransform(self):
# Add the landmark point sets to the transformation
transformation = self.multiWidget.transformations[-1]
transformation.landmarks = self.landmarkPointSets
@overrides(TransformationTool)
def cleanUp(self):
self.fixedPicker.cleanUp()
self.movingPicker.cleanUp()
for landmarkIndicator in self.landmarkIndicators:
landmarkIndicator.cleanUp()
self.landmarkPointSets = []
self.fixedWidget.render()
self.movingWidget.render()
self.multiWidget.render()
self.fixedLandmarkWidget.setVisible(False)
self.movingLandmarkWidget.setVisible(False)
self.toolFinished.emit()
@Slot(int)
def setActiveLandmark(self, index):
self.activeIndex = index
self._update()
if self.activeIndex < len(self.landmarkPointSets):
landmarkSet = self.landmarkPointSets[self.activeIndex]
self._focusCamera(self.fixedWidget, landmarkSet[0])
self._focusCamera(self.movingWidget, landmarkSet[1])
self.fixedWidget.render()
self.movingWidget.render()
self.multiWidget.render()
@Slot(int)
def deleteLandmark(self, index):
if index < len(self.landmarkPointSets):
del self.landmarkPointSets[index]
indices = []
for i in range(len(self.landmarkIndicators)):
indicator = self.landmarkIndicators[i]
if indicator.id == index:
indicator.cleanUp()
indices.append(i)
indices.reverse()
for i in indices:
del self.landmarkIndicators[i]
for indicator in self.landmarkIndicators:
if indicator.id > index:
indicator.id -= 1
self.activeIndex = len(self.landmarkPointSets)
self.pointsWidget.activeIndex = self.activeIndex
# self.activeIndex = -1
self._updateTransform()
self._update()
self.updatedLandmarks.emit(self.landmarkPointSets)
self.fixedWidget.render()
self.movingWidget.render()
self.multiWidget.render()
@Slot(int)
def landmarkTransformTypeChanged(self, value):
"""
Called when the transformation type is changed
from the combo box. Rigid, Similarity or Affine.
"""
if value == 2 and len(self.landmarkPointSets) < 3:
self.landmarkComboBox.setCurrentIndex(self.landmarkTransformType)
# TODO: let the user know that some more landmark point sets are needed...
# Or: solve in another way by only calculating the affine transform when
# there are actually 3 or more complete landmark point sets
return
self.landmarkTransformType = value
self._updateTransform()
self.multiWidget.render()
@Slot(list)
def pickedFixedLocation(self, location):
"""
Place spheres in fixed widget and in multi-widget.
The input location should be in local data coordinates.
"""
self._pickedLocation(location, "fixed")
@Slot(list)
def pickedMovingLocation(self, location):
"""
Place spheres in moving widget and in multi-widget.
The input location should be in local data coordinates.
"""
self._pickedLocation(location, "moving")
@Slot(object)
def landmarkToolTypeChanged(self, widget):
if widget is self.fixedLandmarkWidget:
self.fixedPickerType = widget.landmarkType
self.fixedPicker.cleanUp()
self.fixedPicker.pickedLocation.disconnect()
self.fixedPicker = self._pickerForType(self.fixedPickerType)
self.fixedPicker.setWidget(self.fixedWidget)
self.fixedPicker.pickedLocation.connect(self.pickedFixedLocation)
self.fixedPicker.setPropertiesWidget(self.fixedLandmarkWidget)
if type(self.fixedPicker) == TwoStepPicker:
self.fixedPicker.pickedLocation.connect(self.fixedLandmarkWidget.twoStepWidget.pickedLocation)
self.fixedWidget.render()
elif widget is self.movingLandmarkWidget:
self.movingPickerType = widget.landmarkType
self.movingPicker.cleanUp()
self.movingPicker.pickedLocation.disconnect()
self.movingPicker = self._pickerForType(self.movingPickerType)
self.movingPicker.setWidget(self.movingWidget)
self.movingPicker.pickedLocation.connect(self.pickedMovingLocation)
self.movingPicker.setPropertiesWidget(self.movingLandmarkWidget)
if type(self.movingPicker) == TwoStepPicker:
self.movingPicker.pickedLocation.connect(self.movingLandmarkWidget.twoStepWidget.pickedLocation)
self.movingWidget.render()
# Private methods
def _pickerForType(self, pickerType):
"""
Returns a picker object depending on the given picker type.
"""
if pickerType == SurfaceType:
return SurfacePicker()
elif pickerType == TwoStepType:
return TwoStepPicker()
def _pickedLocation(self, location, landmarkType):
if self.activeIndex < len(self.landmarkPointSets):
# Just update the landmark
landmarks = [x for x in self.landmarkIndicators if (x.id == self.activeIndex and x.flag == landmarkType)]
for landmark in landmarks:
landmark.position = location
index = 0 if landmarkType == "fixed" else 1
if not self.landmarkPointSets[self.activeIndex][index]:
# Add another landmark indicator if there was no landmark
self._addLandmarkIndicator(location, landmarkType)
self.landmarkPointSets[self.activeIndex][index] = location
else:
# Add the location to the landmark points as a set
landmarkSet = [location, None] if (landmarkType == "fixed") else [None, location]
self.landmarkPointSets.append(landmarkSet)
self._addLandmarkIndicator(location, landmarkType)
self._updateTransform()
self._update()
self.updatedLandmarks.emit(self.landmarkPointSets)
self.multiWidget.render()
self.movingWidget.render()
def _updateTransform(self):
"""
Update the landmark transform
"""
if PointsSetsIsEmpty(self.landmarkPointSets):
return
numberOfSets = NumberOfSets(self.landmarkPointSets)
fixedPoints = vtkPoints()
movingPoints = vtkPoints()
fixedPoints.SetNumberOfPoints(numberOfSets)
movingPoints.SetNumberOfPoints(numberOfSets)
pointsetIndex = 0
for index in range(len(self.landmarkPointSets)):
pointset = self.landmarkPointSets[index]
if pointset[0] and pointset[1]:
fixedPoint = pointset[0]
movingPoint = pointset[1]
# Transform the point from the moving landmark with the original transform
transPoint = self.originalTransform.TransformPoint(movingPoint)
fixedPoints.SetPoint(pointsetIndex, fixedPoint)
movingPoints.SetPoint(pointsetIndex, transPoint)
pointsetIndex += 1
landmarkTransform = vtkLandmarkTransform()
if self.landmarkTransformType == 0:
landmarkTransform.SetModeToRigidBody()
elif self.landmarkTransformType == 1:
landmarkTransform.SetModeToSimilarity()
elif self.landmarkTransformType == 2:
landmarkTransform.SetModeToAffine()
landmarkTransform.SetSourceLandmarks(fixedPoints)
landmarkTransform.SetTargetLandmarks(movingPoints)
landmarkTransform.Update()
transform = TransformWithMatrix(landmarkTransform.GetMatrix())
transform.Inverse()
transformation = self.multiWidget.transformations[-1]
assert transformation.transformType == Transformation.TypeLandmark
transformation.transform = transform
self.multiWidget.transformations[-1] = transformation
self._updateLandmarkTransforms()
def _update(self):
for landmark in self.landmarkIndicators:
landmark.active = landmark.id == self.activeIndex
landmark.update()
self._updateLandmarkTransforms()
def _updateLandmarkTransforms(self):
# Update the transforms
for landmarkIndicator in self.landmarkIndicators:
if landmarkIndicator.flag == "moving" and landmarkIndicator.renderer == self.movingWidget.renderer:
# landmarkIndicator.transform = self.multiWidget.transformations.scalingTransform()
# landmarkIndicator.update()
pass
elif landmarkIndicator.flag == "moving" and landmarkIndicator.renderer == self.multiWidget.renderer:
landmarkIndicator.transform = self.multiWidget.transformations.completeTransform()
landmarkIndicator.update()
def _addLandmarkIndicator(self, location, landmarkType):
imageData = self.fixedWidget.imageData if landmarkType == "fixed" else self.movingWidget.imageData
bounds = imageData.GetBounds()
sizes = [bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]]
smallest = min(sizes)
scale = smallest / 30.0
# Create landmark for the correct widget
widget = self.fixedWidget if landmarkType == "fixed" else self.movingWidget
landmark = self._landmarkForWidget(widget, landmarkType)
landmark.id = self.activeIndex
landmark.position = location
landmark.scale = scale
# Create landmark for multi widget
landmarkMulti = self._landmarkForWidget(self.multiWidget, landmarkType)
landmarkMulti.id = self.activeIndex
landmarkMulti.position = location
landmarkMulti.scale = scale
self.landmarkIndicators.append(landmark)
self.landmarkIndicators.append(landmarkMulti)
def _focusCamera(self, widget, location):
if not location:
return
transform = TransformWithMatrix(widget.volume.GetMatrix())
worldPoint = transform.TransformPoint(location)
camera = widget.renderer.GetActiveCamera()
camera.SetFocalPoint(worldPoint)
def _landmarkForWidget(self, widget, landmarkType):
return Landmark(index=self.activeIndex,
renderer=widget.renderer,
overlay=widget.rendererOverlay,
flag=landmarkType)
def PointsSetsIsEmpty(points):
"""
Returns whether there are actual point sets in the
given collection for which both landmarks are set.
"""
return NumberOfSets(points) == 0
def NumberOfSets(points):
"""
Returns the total number of landmark sets in the
given collection of pointsets.
"""
count = 0
for pointset in points:
if pointset[0] is not None and pointset[1] is not None:
count += 1
return count
|
berendkleinhaneveld/Registrationshop
|
ui/transformations/LandmarkTransformationTool.py
|
Python
|
mit
| 13,675
|
"""
Solve the catalysis problem using mcmc.
Author:
Ilias Bilionis
Date:
6/26/2014
"""
import matplotlib
matplotlib.use('Agg')
import numpy as np
import mpi4py.MPI as mpi
import pymc as pm
import pysmc as ps
import sys
import warnings
warnings.filterwarnings('ignore')
sys.path.insert(0,'.')
sys.path.insert(0,'demos/')
from catalysis import CatalysisModelDMNLESS
def make_model():
gamma = 1.
kappa = pm.Gamma('kappa', 4., 1., size=5)
sigma2 = pm.Gamma('sigma2', 0.1, 1., value=100.)
data = np.loadtxt('data.txt').reshape((7, 6))
y = data[:, 1:]
y = y.reshape((1, y.shape[0] * y.shape[1])) / 500.
f = CatalysisModelDMNLESS()
@pm.deterministic
def model_output(kappa=kappa):
return f(kappa)['f']
@pm.stochastic(observed=True)
def output(value=y, model_output=model_output, sigma2=sigma2, gamma=gamma):
return gamma * pm.normal_like(y, model_output, 1. / sigma2)
return locals()
if __name__ == '__main__':
model = make_model()
mcmc = pm.MCMC(model)
mcmc.use_step_method(ps.RandomWalk, model['kappa'])
mcmc.use_step_method(ps.RandomWalk, model['sigma2'])
smc_sampler = ps.SMC(mcmc, num_particles=10240, num_mcmc=1, verbose=4,
db_filename='demos/smc_catalysis.pcl',
gamma_is_an_exponent=True, mpi=mpi,
update_db=True)
smc_sampler.initialize(0.)
smc_sampler.move_to(1.)
|
ebilionis/variational-reformulation-of-inverse-problems
|
unittests/mcmc_catalysis_dmnl.py
|
Python
|
gpl-2.0
| 1,449
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe, erpnext
from frappe.utils import cint, cstr, formatdate, flt, getdate, nowdate, get_link_to_form
from frappe import _, throw
import frappe.defaults
from erpnext.assets.doctype.asset_category.asset_category import get_asset_category_account
from erpnext.controllers.buying_controller import BuyingController
from erpnext.accounts.party import get_party_account, get_due_date
from erpnext.accounts.utils import get_account_currency, get_fiscal_year
from erpnext.stock.doctype.purchase_receipt.purchase_receipt import update_billed_amount_based_on_po
from erpnext.stock import get_warehouse_account_map
from erpnext.accounts.general_ledger import make_gl_entries, merge_similar_entries, make_reverse_gl_entries
from erpnext.accounts.doctype.gl_entry.gl_entry import update_outstanding_amt
from erpnext.buying.utils import check_on_hold_or_closed_status
from erpnext.accounts.general_ledger import get_round_off_account_and_cost_center
from erpnext.assets.doctype.asset.asset import get_asset_account, is_cwip_accounting_enabled
from frappe.model.mapper import get_mapped_doc
from six import iteritems
from erpnext.accounts.doctype.sales_invoice.sales_invoice import validate_inter_company_party, update_linked_doc,\
unlink_inter_company_doc
from erpnext.accounts.doctype.tax_withholding_category.tax_withholding_category import get_party_tax_withholding_details
from erpnext.accounts.deferred_revenue import validate_service_stop_date
from erpnext.stock.doctype.purchase_receipt.purchase_receipt import get_item_account_wise_additional_cost
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class PurchaseInvoice(BuyingController):
def __init__(self, *args, **kwargs):
super(PurchaseInvoice, self).__init__(*args, **kwargs)
self.status_updater = [{
'source_dt': 'Purchase Invoice Item',
'target_dt': 'Purchase Order Item',
'join_field': 'po_detail',
'target_field': 'billed_amt',
'target_parent_dt': 'Purchase Order',
'target_parent_field': 'per_billed',
'target_ref_field': 'amount',
'source_field': 'amount',
'percent_join_field': 'purchase_order',
'overflow_type': 'billing'
}]
def onload(self):
super(PurchaseInvoice, self).onload()
supplier_tds = frappe.db.get_value("Supplier", self.supplier, "tax_withholding_category")
self.set_onload("supplier_tds", supplier_tds)
def before_save(self):
if not self.on_hold:
self.release_date = ''
def invoice_is_blocked(self):
return self.on_hold and (not self.release_date or self.release_date > getdate(nowdate()))
def validate(self):
if not self.is_opening:
self.is_opening = 'No'
self.validate_posting_time()
super(PurchaseInvoice, self).validate()
# apply tax withholding only if checked and applicable
self.set_tax_withholding()
if not self.is_return:
self.po_required()
self.pr_required()
self.validate_supplier_invoice()
# validate cash purchase
if (self.is_paid == 1):
self.validate_cash()
# validate service stop date to lie in between start and end date
validate_service_stop_date(self)
if self._action=="submit" and self.update_stock:
self.make_batches('warehouse')
self.validate_release_date()
self.check_conversion_rate()
self.validate_credit_to_acc()
self.clear_unallocated_advances("Purchase Invoice Advance", "advances")
self.check_on_hold_or_closed_status()
self.validate_with_previous_doc()
self.validate_uom_is_integer("uom", "qty")
self.validate_uom_is_integer("stock_uom", "stock_qty")
self.set_expense_account(for_validate=True)
self.set_against_expense_account()
self.validate_write_off_account()
self.validate_multiple_billing("Purchase Receipt", "pr_detail", "amount", "items")
self.create_remarks()
self.set_status()
self.validate_purchase_receipt_if_update_stock()
validate_inter_company_party(self.doctype, self.supplier, self.company, self.inter_company_invoice_reference)
def validate_release_date(self):
if self.release_date and getdate(nowdate()) >= getdate(self.release_date):
frappe.throw(_('Release date must be in the future'))
def validate_cash(self):
if not self.cash_bank_account and flt(self.paid_amount):
frappe.throw(_("Cash or Bank Account is mandatory for making payment entry"))
if (flt(self.paid_amount) + flt(self.write_off_amount)
- flt(self.get("rounded_total") or self.grand_total)
> 1/(10**(self.precision("base_grand_total") + 1))):
frappe.throw(_("""Paid amount + Write Off Amount can not be greater than Grand Total"""))
def create_remarks(self):
if not self.remarks:
if self.bill_no and self.bill_date:
self.remarks = _("Against Supplier Invoice {0} dated {1}").format(self.bill_no,
formatdate(self.bill_date))
else:
self.remarks = _("No Remarks")
def set_missing_values(self, for_validate=False):
if not self.credit_to:
self.credit_to = get_party_account("Supplier", self.supplier, self.company)
self.party_account_currency = frappe.db.get_value("Account", self.credit_to, "account_currency", cache=True)
if not self.due_date:
self.due_date = get_due_date(self.posting_date, "Supplier", self.supplier, self.company, self.bill_date)
tds_category = frappe.db.get_value("Supplier", self.supplier, "tax_withholding_category")
if tds_category and not for_validate:
self.apply_tds = 1
self.tax_withholding_category = tds_category
super(PurchaseInvoice, self).set_missing_values(for_validate)
def check_conversion_rate(self):
default_currency = erpnext.get_company_currency(self.company)
if not default_currency:
throw(_('Please enter default currency in Company Master'))
if (self.currency == default_currency and flt(self.conversion_rate) != 1.00) or not self.conversion_rate or (self.currency != default_currency and flt(self.conversion_rate) == 1.00):
throw(_("Conversion rate cannot be 0 or 1"))
def validate_credit_to_acc(self):
if not self.credit_to:
self.credit_to = get_party_account("Supplier", self.supplier, self.company)
if not self.credit_to:
self.raise_missing_debit_credit_account_error("Supplier", self.supplier)
account = frappe.db.get_value("Account", self.credit_to,
["account_type", "report_type", "account_currency"], as_dict=True)
if account.report_type != "Balance Sheet":
frappe.throw(
_("Please ensure {} account is a Balance Sheet account. You can change the parent account to a Balance Sheet account or select a different account.")
.format(frappe.bold("Credit To")), title=_("Invalid Account")
)
if self.supplier and account.account_type != "Payable":
frappe.throw(
_("Please ensure {} account is a Payable account. Change the account type to Payable or select a different account.")
.format(frappe.bold("Credit To")), title=_("Invalid Account")
)
self.party_account_currency = account.account_currency
def check_on_hold_or_closed_status(self):
check_list = []
for d in self.get('items'):
if d.purchase_order and not d.purchase_order in check_list and not d.purchase_receipt:
check_list.append(d.purchase_order)
check_on_hold_or_closed_status('Purchase Order', d.purchase_order)
def validate_with_previous_doc(self):
super(PurchaseInvoice, self).validate_with_previous_doc({
"Purchase Order": {
"ref_dn_field": "purchase_order",
"compare_fields": [["supplier", "="], ["company", "="], ["currency", "="]],
},
"Purchase Order Item": {
"ref_dn_field": "po_detail",
"compare_fields": [["project", "="], ["item_code", "="], ["uom", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
},
"Purchase Receipt": {
"ref_dn_field": "purchase_receipt",
"compare_fields": [["supplier", "="], ["company", "="], ["currency", "="]],
},
"Purchase Receipt Item": {
"ref_dn_field": "pr_detail",
"compare_fields": [["project", "="], ["item_code", "="], ["uom", "="]],
"is_child_table": True
}
})
if cint(frappe.db.get_single_value('Buying Settings', 'maintain_same_rate')) and not self.is_return:
self.validate_rate_with_reference_doc([
["Purchase Order", "purchase_order", "po_detail"],
["Purchase Receipt", "purchase_receipt", "pr_detail"]
])
def validate_warehouse(self, for_validate=True):
if self.update_stock and for_validate:
for d in self.get('items'):
if not d.warehouse:
frappe.throw(_("Warehouse required at Row No {0}, please set default warehouse for the item {1} for the company {2}").
format(d.idx, d.item_code, self.company))
super(PurchaseInvoice, self).validate_warehouse()
def validate_item_code(self):
for d in self.get('items'):
if not d.item_code:
frappe.msgprint(_("Item Code required at Row No {0}").format(d.idx), raise_exception=True)
def set_expense_account(self, for_validate=False):
auto_accounting_for_stock = erpnext.is_perpetual_inventory_enabled(self.company)
if auto_accounting_for_stock:
stock_not_billed_account = self.get_company_default("stock_received_but_not_billed")
stock_items = self.get_stock_items()
asset_items = [d.is_fixed_asset for d in self.items if d.is_fixed_asset]
if len(asset_items) > 0:
asset_received_but_not_billed = self.get_company_default("asset_received_but_not_billed")
if self.update_stock:
self.validate_item_code()
self.validate_warehouse(for_validate)
if auto_accounting_for_stock:
warehouse_account = get_warehouse_account_map(self.company)
for item in self.get("items"):
# in case of auto inventory accounting,
# expense account is always "Stock Received But Not Billed" for a stock item
# except opening entry, drop-ship entry and fixed asset items
if item.item_code:
asset_category = frappe.get_cached_value("Item", item.item_code, "asset_category")
if auto_accounting_for_stock and item.item_code in stock_items \
and self.is_opening == 'No' and not item.is_fixed_asset \
and (not item.po_detail or
not frappe.db.get_value("Purchase Order Item", item.po_detail, "delivered_by_supplier")):
if self.update_stock and (not item.from_warehouse):
if for_validate and item.expense_account and item.expense_account != warehouse_account[item.warehouse]["account"]:
msg = _("Row {}: Expense Head changed to {} ").format(item.idx, frappe.bold(warehouse_account[item.warehouse]["account"]))
msg += _("because account {} is not linked to warehouse {} ").format(frappe.bold(item.expense_account), frappe.bold(item.warehouse))
msg += _("or it is not the default inventory account")
frappe.msgprint(msg, title=_("Expense Head Changed"))
item.expense_account = warehouse_account[item.warehouse]["account"]
else:
# check if 'Stock Received But Not Billed' account is credited in Purchase receipt or not
if item.purchase_receipt:
negative_expense_booked_in_pr = frappe.db.sql("""select name from `tabGL Entry`
where voucher_type='Purchase Receipt' and voucher_no=%s and account = %s""",
(item.purchase_receipt, stock_not_billed_account))
if negative_expense_booked_in_pr:
if for_validate and item.expense_account and item.expense_account != stock_not_billed_account:
msg = _("Row {}: Expense Head changed to {} ").format(item.idx, frappe.bold(stock_not_billed_account))
msg += _("because expense is booked against this account in Purchase Receipt {}").format(frappe.bold(item.purchase_receipt))
frappe.msgprint(msg, title=_("Expense Head Changed"))
item.expense_account = stock_not_billed_account
else:
# If no purchase receipt present then book expense in 'Stock Received But Not Billed'
# This is done in cases when Purchase Invoice is created before Purchase Receipt
if for_validate and item.expense_account and item.expense_account != stock_not_billed_account:
msg = _("Row {}: Expense Head changed to {} ").format(item.idx, frappe.bold(stock_not_billed_account))
msg += _("as no Purchase Receipt is created against Item {}. ").format(frappe.bold(item.item_code))
msg += _("This is done to handle accounting for cases when Purchase Receipt is created after Purchase Invoice")
frappe.msgprint(msg, title=_("Expense Head Changed"))
item.expense_account = stock_not_billed_account
elif item.is_fixed_asset and not is_cwip_accounting_enabled(asset_category):
item.expense_account = get_asset_category_account('fixed_asset_account', item=item.item_code,
company = self.company)
elif item.is_fixed_asset and item.pr_detail:
item.expense_account = asset_received_but_not_billed
elif not item.expense_account and for_validate:
throw(_("Expense account is mandatory for item {0}").format(item.item_code or item.item_name))
def set_against_expense_account(self):
against_accounts = []
for item in self.get("items"):
if item.expense_account and (item.expense_account not in against_accounts):
against_accounts.append(item.expense_account)
self.against_expense_account = ",".join(against_accounts)
def po_required(self):
if frappe.db.get_value("Buying Settings", None, "po_required") == 'Yes':
if frappe.get_value('Supplier', self.supplier, 'allow_purchase_invoice_creation_without_purchase_order'):
return
for d in self.get('items'):
if not d.purchase_order:
msg = _("Purchase Order Required for item {}").format(frappe.bold(d.item_code))
msg += "<br><br>"
msg += _("To submit the invoice without purchase order please set {} ").format(frappe.bold(_('Purchase Order Required')))
msg += _("as {} in {}").format(frappe.bold('No'), get_link_to_form('Buying Settings', 'Buying Settings', 'Buying Settings'))
throw(msg, title=_("Mandatory Purchase Order"))
def pr_required(self):
stock_items = self.get_stock_items()
if frappe.db.get_value("Buying Settings", None, "pr_required") == 'Yes':
if frappe.get_value('Supplier', self.supplier, 'allow_purchase_invoice_creation_without_purchase_receipt'):
return
for d in self.get('items'):
if not d.purchase_receipt and d.item_code in stock_items:
msg = _("Purchase Receipt Required for item {}").format(frappe.bold(d.item_code))
msg += "<br><br>"
msg += _("To submit the invoice without purchase receipt please set {} ").format(frappe.bold(_('Purchase Receipt Required')))
msg += _("as {} in {}").format(frappe.bold('No'), get_link_to_form('Buying Settings', 'Buying Settings', 'Buying Settings'))
throw(msg, title=_("Mandatory Purchase Receipt"))
def validate_write_off_account(self):
if self.write_off_amount and not self.write_off_account:
throw(_("Please enter Write Off Account"))
def check_prev_docstatus(self):
for d in self.get('items'):
if d.purchase_order:
submitted = frappe.db.sql("select name from `tabPurchase Order` where docstatus = 1 and name = %s", d.purchase_order)
if not submitted:
frappe.throw(_("Purchase Order {0} is not submitted").format(d.purchase_order))
if d.purchase_receipt:
submitted = frappe.db.sql("select name from `tabPurchase Receipt` where docstatus = 1 and name = %s", d.purchase_receipt)
if not submitted:
frappe.throw(_("Purchase Receipt {0} is not submitted").format(d.purchase_receipt))
def update_status_updater_args(self):
if cint(self.update_stock):
self.status_updater.append({
'source_dt': 'Purchase Invoice Item',
'target_dt': 'Purchase Order Item',
'join_field': 'po_detail',
'target_field': 'received_qty',
'target_parent_dt': 'Purchase Order',
'target_parent_field': 'per_received',
'target_ref_field': 'qty',
'source_field': 'received_qty',
'second_source_dt': 'Purchase Receipt Item',
'second_source_field': 'received_qty',
'second_join_field': 'purchase_order_item',
'percent_join_field':'purchase_order',
'overflow_type': 'receipt',
'extra_cond': """ and exists(select name from `tabPurchase Invoice`
where name=`tabPurchase Invoice Item`.parent and update_stock = 1)"""
})
if cint(self.is_return):
self.status_updater.append({
'source_dt': 'Purchase Invoice Item',
'target_dt': 'Purchase Order Item',
'join_field': 'po_detail',
'target_field': 'returned_qty',
'source_field': '-1 * qty',
'second_source_dt': 'Purchase Receipt Item',
'second_source_field': '-1 * qty',
'second_join_field': 'purchase_order_item',
'overflow_type': 'receipt',
'extra_cond': """ and exists (select name from `tabPurchase Invoice`
where name=`tabPurchase Invoice Item`.parent and update_stock=1 and is_return=1)"""
})
def validate_purchase_receipt_if_update_stock(self):
if self.update_stock:
for item in self.get("items"):
if item.purchase_receipt:
frappe.throw(_("Stock cannot be updated against Purchase Receipt {0}")
.format(item.purchase_receipt))
def on_submit(self):
super(PurchaseInvoice, self).on_submit()
self.check_prev_docstatus()
self.update_status_updater_args()
self.update_prevdoc_status()
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype,
self.company, self.base_grand_total)
if not self.is_return:
self.update_against_document_in_jv()
self.update_billing_status_for_zero_amount_refdoc("Purchase Receipt")
self.update_billing_status_for_zero_amount_refdoc("Purchase Order")
self.update_billing_status_in_pr()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating ordered qty in bin depends upon updated ordered qty in PO
if self.update_stock == 1:
self.update_stock_ledger()
from erpnext.stock.doctype.serial_no.serial_no import update_serial_nos_after_submit
update_serial_nos_after_submit(self, "items")
# this sequence because outstanding may get -negative
self.make_gl_entries()
if self.update_stock == 1:
self.repost_future_sle_and_gle()
self.update_project()
update_linked_doc(self.doctype, self.name, self.inter_company_invoice_reference)
def make_gl_entries(self, gl_entries=None, from_repost=False):
if not gl_entries:
gl_entries = self.get_gl_entries()
if gl_entries:
update_outstanding = "No" if (cint(self.is_paid) or self.write_off_account) else "Yes"
if self.docstatus == 1:
make_gl_entries(gl_entries, update_outstanding=update_outstanding, merge_entries=False, from_repost=from_repost)
elif self.docstatus == 2:
make_reverse_gl_entries(voucher_type=self.doctype, voucher_no=self.name)
if update_outstanding == "No":
update_outstanding_amt(self.credit_to, "Supplier", self.supplier,
self.doctype, self.return_against if cint(self.is_return) and self.return_against else self.name)
elif self.docstatus == 2 and cint(self.update_stock) and self.auto_accounting_for_stock:
make_reverse_gl_entries(voucher_type=self.doctype, voucher_no=self.name)
def get_gl_entries(self, warehouse_account=None):
self.auto_accounting_for_stock = erpnext.is_perpetual_inventory_enabled(self.company)
if self.auto_accounting_for_stock:
self.stock_received_but_not_billed = self.get_company_default("stock_received_but_not_billed")
self.expenses_included_in_valuation = self.get_company_default("expenses_included_in_valuation")
else:
self.stock_received_but_not_billed = None
self.expenses_included_in_valuation = None
self.negative_expense_to_be_booked = 0.0
gl_entries = []
self.make_supplier_gl_entry(gl_entries)
self.make_item_gl_entries(gl_entries)
if self.check_asset_cwip_enabled():
self.get_asset_gl_entry(gl_entries)
self.make_tax_gl_entries(gl_entries)
self.make_internal_transfer_gl_entries(gl_entries)
gl_entries = make_regional_gl_entries(gl_entries, self)
gl_entries = merge_similar_entries(gl_entries)
self.make_payment_gl_entries(gl_entries)
self.make_write_off_gl_entry(gl_entries)
self.make_gle_for_rounding_adjustment(gl_entries)
return gl_entries
def check_asset_cwip_enabled(self):
# Check if there exists any item with cwip accounting enabled in it's asset category
for item in self.get("items"):
if item.item_code and item.is_fixed_asset:
asset_category = frappe.get_cached_value("Item", item.item_code, "asset_category")
if is_cwip_accounting_enabled(asset_category):
return 1
return 0
def make_supplier_gl_entry(self, gl_entries):
# Checked both rounding_adjustment and rounded_total
# because rounded_total had value even before introcution of posting GLE based on rounded total
grand_total = self.rounded_total if (self.rounding_adjustment and self.rounded_total) else self.grand_total
if grand_total and not self.is_internal_transfer():
# Did not use base_grand_total to book rounding loss gle
grand_total_in_company_currency = flt(grand_total * self.conversion_rate,
self.precision("grand_total"))
gl_entries.append(
self.get_gl_dict({
"account": self.credit_to,
"party_type": "Supplier",
"party": self.supplier,
"due_date": self.due_date,
"against": self.against_expense_account,
"credit": grand_total_in_company_currency,
"credit_in_account_currency": grand_total_in_company_currency \
if self.party_account_currency==self.company_currency else grand_total,
"against_voucher": self.return_against if cint(self.is_return) and self.return_against else self.name,
"against_voucher_type": self.doctype,
"project": self.project,
"cost_center": self.cost_center
}, self.party_account_currency, item=self)
)
def make_item_gl_entries(self, gl_entries):
# item gl entries
stock_items = self.get_stock_items()
if self.update_stock and self.auto_accounting_for_stock:
warehouse_account = get_warehouse_account_map(self.company)
landed_cost_entries = get_item_account_wise_additional_cost(self.name)
voucher_wise_stock_value = {}
if self.update_stock:
for d in frappe.get_all('Stock Ledger Entry',
fields = ["voucher_detail_no", "stock_value_difference", "warehouse"], filters={'voucher_no': self.name}):
voucher_wise_stock_value.setdefault((d.voucher_detail_no, d.warehouse), d.stock_value_difference)
valuation_tax_accounts = [d.account_head for d in self.get("taxes")
if d.category in ('Valuation', 'Total and Valuation')
and flt(d.base_tax_amount_after_discount_amount)]
for item in self.get("items"):
if flt(item.base_net_amount):
account_currency = get_account_currency(item.expense_account)
if item.item_code:
asset_category = frappe.get_cached_value("Item", item.item_code, "asset_category")
if self.update_stock and self.auto_accounting_for_stock and item.item_code in stock_items:
# warehouse account
warehouse_debit_amount = self.make_stock_adjustment_entry(gl_entries,
item, voucher_wise_stock_value, account_currency)
if item.from_warehouse:
gl_entries.append(self.get_gl_dict({
"account": warehouse_account[item.warehouse]['account'],
"against": warehouse_account[item.from_warehouse]["account"],
"cost_center": item.cost_center,
"project": item.project or self.project,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"debit": warehouse_debit_amount,
}, warehouse_account[item.warehouse]["account_currency"], item=item))
# Intentionally passed negative debit amount to avoid incorrect GL Entry validation
gl_entries.append(self.get_gl_dict({
"account": warehouse_account[item.from_warehouse]['account'],
"against": warehouse_account[item.warehouse]["account"],
"cost_center": item.cost_center,
"project": item.project or self.project,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"debit": -1 * flt(item.base_net_amount, item.precision("base_net_amount")),
}, warehouse_account[item.from_warehouse]["account_currency"], item=item))
# Do not book expense for transfer within same company transfer
if not self.is_internal_transfer():
gl_entries.append(
self.get_gl_dict({
"account": item.expense_account,
"against": self.supplier,
"debit": flt(item.base_net_amount, item.precision("base_net_amount")),
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"cost_center": item.cost_center,
"project": item.project
}, account_currency, item=item)
)
else:
if not self.is_internal_transfer():
gl_entries.append(
self.get_gl_dict({
"account": item.expense_account,
"against": self.supplier,
"debit": warehouse_debit_amount,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"cost_center": item.cost_center,
"project": item.project or self.project
}, account_currency, item=item)
)
# Amount added through landed-cost-voucher
if landed_cost_entries:
for account, amount in iteritems(landed_cost_entries[(item.item_code, item.name)]):
gl_entries.append(self.get_gl_dict({
"account": account,
"against": item.expense_account,
"cost_center": item.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"credit": flt(amount["base_amount"]),
"credit_in_account_currency": flt(amount["amount"]),
"project": item.project or self.project
}, item=item))
# sub-contracting warehouse
if flt(item.rm_supp_cost):
supplier_warehouse_account = warehouse_account[self.supplier_warehouse]["account"]
if not supplier_warehouse_account:
frappe.throw(_("Please set account in Warehouse {0}")
.format(self.supplier_warehouse))
gl_entries.append(self.get_gl_dict({
"account": supplier_warehouse_account,
"against": item.expense_account,
"cost_center": item.cost_center,
"project": item.project or self.project,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"credit": flt(item.rm_supp_cost)
}, warehouse_account[self.supplier_warehouse]["account_currency"], item=item))
elif not item.is_fixed_asset or (item.is_fixed_asset and not is_cwip_accounting_enabled(asset_category)):
expense_account = (item.expense_account
if (not item.enable_deferred_expense or self.is_return) else item.deferred_expense_account)
if not item.is_fixed_asset:
amount = flt(item.base_net_amount, item.precision("base_net_amount"))
else:
amount = flt(item.base_net_amount + item.item_tax_amount, item.precision("base_net_amount"))
auto_accounting_for_non_stock_items = cint(frappe.db.get_value('Company', self.company, 'enable_perpetual_inventory_for_non_stock_items'))
if auto_accounting_for_non_stock_items:
service_received_but_not_billed_account = self.get_company_default("service_received_but_not_billed")
if item.purchase_receipt:
# Post reverse entry for Stock-Received-But-Not-Billed if it is booked in Purchase Receipt
expense_booked_in_pr = frappe.db.get_value('GL Entry', {'is_cancelled': 0,
'voucher_type': 'Purchase Receipt', 'voucher_no': item.purchase_receipt, 'voucher_detail_no': item.pr_detail,
'account':service_received_but_not_billed_account}, ['name'])
if expense_booked_in_pr:
expense_account = service_received_but_not_billed_account
if not self.is_internal_transfer():
gl_entries.append(self.get_gl_dict({
"account": expense_account,
"against": self.supplier,
"debit": amount,
"cost_center": item.cost_center,
"project": item.project or self.project
}, account_currency, item=item))
# If asset is bought through this document and not linked to PR
if self.update_stock and item.landed_cost_voucher_amount:
expenses_included_in_asset_valuation = self.get_company_default("expenses_included_in_asset_valuation")
# Amount added through landed-cost-voucher
gl_entries.append(self.get_gl_dict({
"account": expenses_included_in_asset_valuation,
"against": expense_account,
"cost_center": item.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"credit": flt(item.landed_cost_voucher_amount),
"project": item.project or self.project
}, item=item))
gl_entries.append(self.get_gl_dict({
"account": expense_account,
"against": expenses_included_in_asset_valuation,
"cost_center": item.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"debit": flt(item.landed_cost_voucher_amount),
"project": item.project or self.project
}, item=item))
# update gross amount of asset bought through this document
assets = frappe.db.get_all('Asset',
filters={ 'purchase_invoice': self.name, 'item_code': item.item_code }
)
for asset in assets:
frappe.db.set_value("Asset", asset.name, "gross_purchase_amount", flt(item.valuation_rate))
frappe.db.set_value("Asset", asset.name, "purchase_receipt_amount", flt(item.valuation_rate))
if self.auto_accounting_for_stock and self.is_opening == "No" and \
item.item_code in stock_items and item.item_tax_amount:
# Post reverse entry for Stock-Received-But-Not-Billed if it is booked in Purchase Receipt
if item.purchase_receipt and valuation_tax_accounts:
negative_expense_booked_in_pr = frappe.db.sql("""select name from `tabGL Entry`
where voucher_type='Purchase Receipt' and voucher_no=%s and account in %s""",
(item.purchase_receipt, valuation_tax_accounts))
if not negative_expense_booked_in_pr:
gl_entries.append(
self.get_gl_dict({
"account": self.stock_received_but_not_billed,
"against": self.supplier,
"debit": flt(item.item_tax_amount, item.precision("item_tax_amount")),
"remarks": self.remarks or "Accounting Entry for Stock",
"cost_center": self.cost_center,
"project": item.project or self.project
}, item=item)
)
self.negative_expense_to_be_booked += flt(item.item_tax_amount, \
item.precision("item_tax_amount"))
def get_asset_gl_entry(self, gl_entries):
arbnb_account = self.get_company_default("asset_received_but_not_billed")
eiiav_account = self.get_company_default("expenses_included_in_asset_valuation")
for item in self.get("items"):
if item.is_fixed_asset:
asset_amount = flt(item.net_amount) + flt(item.item_tax_amount/self.conversion_rate)
base_asset_amount = flt(item.base_net_amount + item.item_tax_amount)
item_exp_acc_type = frappe.db.get_value('Account', item.expense_account, 'account_type')
if (not item.expense_account or item_exp_acc_type not in ['Asset Received But Not Billed', 'Fixed Asset']):
item.expense_account = arbnb_account
if not self.update_stock:
arbnb_currency = get_account_currency(item.expense_account)
gl_entries.append(self.get_gl_dict({
"account": item.expense_account,
"against": self.supplier,
"remarks": self.get("remarks") or _("Accounting Entry for Asset"),
"debit": base_asset_amount,
"debit_in_account_currency": (base_asset_amount
if arbnb_currency == self.company_currency else asset_amount),
"cost_center": item.cost_center,
"project": item.project or self.project
}, item=item))
if item.item_tax_amount:
asset_eiiav_currency = get_account_currency(eiiav_account)
gl_entries.append(self.get_gl_dict({
"account": eiiav_account,
"against": self.supplier,
"remarks": self.get("remarks") or _("Accounting Entry for Asset"),
"cost_center": item.cost_center,
"project": item.project or self.project,
"credit": item.item_tax_amount,
"credit_in_account_currency": (item.item_tax_amount
if asset_eiiav_currency == self.company_currency else
item.item_tax_amount / self.conversion_rate)
}, item=item))
else:
cwip_account = get_asset_account("capital_work_in_progress_account",
asset_category=item.asset_category,company=self.company)
cwip_account_currency = get_account_currency(cwip_account)
gl_entries.append(self.get_gl_dict({
"account": cwip_account,
"against": self.supplier,
"remarks": self.get("remarks") or _("Accounting Entry for Asset"),
"debit": base_asset_amount,
"debit_in_account_currency": (base_asset_amount
if cwip_account_currency == self.company_currency else asset_amount),
"cost_center": self.cost_center,
"project": item.project or self.project
}, item=item))
if item.item_tax_amount and not cint(erpnext.is_perpetual_inventory_enabled(self.company)):
asset_eiiav_currency = get_account_currency(eiiav_account)
gl_entries.append(self.get_gl_dict({
"account": eiiav_account,
"against": self.supplier,
"remarks": self.get("remarks") or _("Accounting Entry for Asset"),
"cost_center": item.cost_center,
"credit": item.item_tax_amount,
"project": item.project or self.project,
"credit_in_account_currency": (item.item_tax_amount
if asset_eiiav_currency == self.company_currency else
item.item_tax_amount / self.conversion_rate)
}, item=item))
# When update stock is checked
# Assets are bought through this document then it will be linked to this document
if self.update_stock:
if flt(item.landed_cost_voucher_amount):
gl_entries.append(self.get_gl_dict({
"account": eiiav_account,
"against": cwip_account,
"cost_center": item.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"credit": flt(item.landed_cost_voucher_amount),
"project": item.project or self.project
}, item=item))
gl_entries.append(self.get_gl_dict({
"account": cwip_account,
"against": eiiav_account,
"cost_center": item.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"debit": flt(item.landed_cost_voucher_amount),
"project": item.project or self.project
}, item=item))
# update gross amount of assets bought through this document
assets = frappe.db.get_all('Asset',
filters={ 'purchase_invoice': self.name, 'item_code': item.item_code }
)
for asset in assets:
frappe.db.set_value("Asset", asset.name, "gross_purchase_amount", flt(item.valuation_rate))
frappe.db.set_value("Asset", asset.name, "purchase_receipt_amount", flt(item.valuation_rate))
return gl_entries
def make_stock_adjustment_entry(self, gl_entries, item, voucher_wise_stock_value, account_currency):
net_amt_precision = item.precision("base_net_amount")
val_rate_db_precision = 6 if cint(item.precision("valuation_rate")) <= 6 else 9
warehouse_debit_amount = flt(flt(item.valuation_rate, val_rate_db_precision)
* flt(item.qty) * flt(item.conversion_factor), net_amt_precision)
# Stock ledger value is not matching with the warehouse amount
if (self.update_stock and voucher_wise_stock_value.get(item.name) and
warehouse_debit_amount != flt(voucher_wise_stock_value.get((item.name, item.warehouse)), net_amt_precision)):
cost_of_goods_sold_account = self.get_company_default("default_expense_account")
stock_amount = flt(voucher_wise_stock_value.get((item.name, item.warehouse)), net_amt_precision)
stock_adjustment_amt = warehouse_debit_amount - stock_amount
gl_entries.append(
self.get_gl_dict({
"account": cost_of_goods_sold_account,
"against": item.expense_account,
"debit": stock_adjustment_amt,
"remarks": self.get("remarks") or _("Stock Adjustment"),
"cost_center": item.cost_center,
"project": item.project or self.project
}, account_currency, item=item)
)
warehouse_debit_amount = stock_amount
return warehouse_debit_amount
def make_tax_gl_entries(self, gl_entries):
# tax table gl entries
valuation_tax = {}
for tax in self.get("taxes"):
if tax.category in ("Total", "Valuation and Total") and flt(tax.base_tax_amount_after_discount_amount):
account_currency = get_account_currency(tax.account_head)
dr_or_cr = "debit" if tax.add_deduct_tax == "Add" else "credit"
gl_entries.append(
self.get_gl_dict({
"account": tax.account_head,
"against": self.supplier,
dr_or_cr: tax.base_tax_amount_after_discount_amount,
dr_or_cr + "_in_account_currency": tax.base_tax_amount_after_discount_amount \
if account_currency==self.company_currency \
else tax.tax_amount_after_discount_amount,
"cost_center": tax.cost_center
}, account_currency, item=tax)
)
# accumulate valuation tax
if self.is_opening == "No" and tax.category in ("Valuation", "Valuation and Total") and flt(tax.base_tax_amount_after_discount_amount) \
and not self.is_internal_transfer():
if self.auto_accounting_for_stock and not tax.cost_center:
frappe.throw(_("Cost Center is required in row {0} in Taxes table for type {1}").format(tax.idx, _(tax.category)))
valuation_tax.setdefault(tax.name, 0)
valuation_tax[tax.name] += \
(tax.add_deduct_tax == "Add" and 1 or -1) * flt(tax.base_tax_amount_after_discount_amount)
if self.is_opening == "No" and self.negative_expense_to_be_booked and valuation_tax:
# credit valuation tax amount in "Expenses Included In Valuation"
# this will balance out valuation amount included in cost of goods sold
total_valuation_amount = sum(valuation_tax.values())
amount_including_divisional_loss = self.negative_expense_to_be_booked
i = 1
for tax in self.get("taxes"):
if valuation_tax.get(tax.name):
if i == len(valuation_tax):
applicable_amount = amount_including_divisional_loss
else:
applicable_amount = self.negative_expense_to_be_booked * (valuation_tax[tax.name] / total_valuation_amount)
amount_including_divisional_loss -= applicable_amount
gl_entries.append(
self.get_gl_dict({
"account": tax.account_head,
"cost_center": tax.cost_center,
"against": self.supplier,
"credit": applicable_amount,
"remarks": self.remarks or _("Accounting Entry for Stock"),
}, item=tax)
)
i += 1
if self.auto_accounting_for_stock and self.update_stock and valuation_tax:
for tax in self.get("taxes"):
if valuation_tax.get(tax.name):
gl_entries.append(
self.get_gl_dict({
"account": tax.account_head,
"cost_center": tax.cost_center,
"against": self.supplier,
"credit": valuation_tax[tax.name],
"remarks": self.remarks or "Accounting Entry for Stock"
}, item=tax))
def make_internal_transfer_gl_entries(self, gl_entries):
if self.is_internal_transfer() and flt(self.base_total_taxes_and_charges):
account_currency = get_account_currency(self.unrealized_profit_loss_account)
gl_entries.append(
self.get_gl_dict({
"account": self.unrealized_profit_loss_account,
"against": self.supplier,
"credit": flt(self.total_taxes_and_charges),
"credit_in_account_currency": flt(self.base_total_taxes_and_charges),
"cost_center": self.cost_center
}, account_currency, item=self))
def make_payment_gl_entries(self, gl_entries):
# Make Cash GL Entries
if cint(self.is_paid) and self.cash_bank_account and self.paid_amount:
bank_account_currency = get_account_currency(self.cash_bank_account)
# CASH, make payment entries
gl_entries.append(
self.get_gl_dict({
"account": self.credit_to,
"party_type": "Supplier",
"party": self.supplier,
"against": self.cash_bank_account,
"debit": self.base_paid_amount,
"debit_in_account_currency": self.base_paid_amount \
if self.party_account_currency==self.company_currency else self.paid_amount,
"against_voucher": self.return_against if cint(self.is_return) and self.return_against else self.name,
"against_voucher_type": self.doctype,
"cost_center": self.cost_center,
"project": self.project
}, self.party_account_currency, item=self)
)
gl_entries.append(
self.get_gl_dict({
"account": self.cash_bank_account,
"against": self.supplier,
"credit": self.base_paid_amount,
"credit_in_account_currency": self.base_paid_amount \
if bank_account_currency==self.company_currency else self.paid_amount,
"cost_center": self.cost_center
}, bank_account_currency, item=self)
)
def make_write_off_gl_entry(self, gl_entries):
# writeoff account includes petty difference in the invoice amount
# and the amount that is paid
if self.write_off_account and flt(self.write_off_amount):
write_off_account_currency = get_account_currency(self.write_off_account)
gl_entries.append(
self.get_gl_dict({
"account": self.credit_to,
"party_type": "Supplier",
"party": self.supplier,
"against": self.write_off_account,
"debit": self.base_write_off_amount,
"debit_in_account_currency": self.base_write_off_amount \
if self.party_account_currency==self.company_currency else self.write_off_amount,
"against_voucher": self.return_against if cint(self.is_return) and self.return_against else self.name,
"against_voucher_type": self.doctype,
"cost_center": self.cost_center,
"project": self.project
}, self.party_account_currency, item=self)
)
gl_entries.append(
self.get_gl_dict({
"account": self.write_off_account,
"against": self.supplier,
"credit": flt(self.base_write_off_amount),
"credit_in_account_currency": self.base_write_off_amount \
if write_off_account_currency==self.company_currency else self.write_off_amount,
"cost_center": self.cost_center or self.write_off_cost_center
}, item=self)
)
def make_gle_for_rounding_adjustment(self, gl_entries):
# if rounding adjustment in small and conversion rate is also small then
# base_rounding_adjustment may become zero due to small precision
# eg: rounding_adjustment = 0.01 and exchange rate = 0.05 and precision of base_rounding_adjustment is 2
# then base_rounding_adjustment becomes zero and error is thrown in GL Entry
if not self.is_internal_transfer() and self.rounding_adjustment and self.base_rounding_adjustment:
round_off_account, round_off_cost_center = \
get_round_off_account_and_cost_center(self.company)
gl_entries.append(
self.get_gl_dict({
"account": round_off_account,
"against": self.supplier,
"debit_in_account_currency": self.rounding_adjustment,
"debit": self.base_rounding_adjustment,
"cost_center": self.cost_center or round_off_cost_center,
}, item=self))
def on_cancel(self):
super(PurchaseInvoice, self).on_cancel()
self.check_on_hold_or_closed_status()
self.update_status_updater_args()
self.update_prevdoc_status()
if not self.is_return:
self.update_billing_status_for_zero_amount_refdoc("Purchase Receipt")
self.update_billing_status_for_zero_amount_refdoc("Purchase Order")
self.update_billing_status_in_pr()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating ordered qty in bin depends upon updated ordered qty in PO
if self.update_stock == 1:
self.update_stock_ledger()
self.delete_auto_created_batches()
self.make_gl_entries_on_cancel()
if self.update_stock == 1:
self.repost_future_sle_and_gle()
self.update_project()
frappe.db.set(self, 'status', 'Cancelled')
unlink_inter_company_doc(self.doctype, self.name, self.inter_company_invoice_reference)
self.ignore_linked_doctypes = ('GL Entry', 'Stock Ledger Entry', 'Repost Item Valuation')
def update_project(self):
project_list = []
for d in self.items:
if d.project and d.project not in project_list:
project = frappe.get_doc("Project", d.project)
project.update_purchase_costing()
project.db_update()
project_list.append(d.project)
def validate_supplier_invoice(self):
if self.bill_date:
if getdate(self.bill_date) > getdate(self.posting_date):
frappe.throw(_("Supplier Invoice Date cannot be greater than Posting Date"))
if self.bill_no:
if cint(frappe.db.get_single_value("Accounts Settings", "check_supplier_invoice_uniqueness")):
fiscal_year = get_fiscal_year(self.posting_date, company=self.company, as_dict=True)
pi = frappe.db.sql('''select name from `tabPurchase Invoice`
where
bill_no = %(bill_no)s
and supplier = %(supplier)s
and name != %(name)s
and docstatus < 2
and posting_date between %(year_start_date)s and %(year_end_date)s''', {
"bill_no": self.bill_no,
"supplier": self.supplier,
"name": self.name,
"year_start_date": fiscal_year.year_start_date,
"year_end_date": fiscal_year.year_end_date
})
if pi:
pi = pi[0][0]
frappe.throw(_("Supplier Invoice No exists in Purchase Invoice {0}").format(pi))
def update_billing_status_in_pr(self, update_modified=True):
updated_pr = []
for d in self.get("items"):
if d.pr_detail:
billed_amt = frappe.db.sql("""select sum(amount) from `tabPurchase Invoice Item`
where pr_detail=%s and docstatus=1""", d.pr_detail)
billed_amt = billed_amt and billed_amt[0][0] or 0
frappe.db.set_value("Purchase Receipt Item", d.pr_detail, "billed_amt", billed_amt, update_modified=update_modified)
updated_pr.append(d.purchase_receipt)
elif d.po_detail:
updated_pr += update_billed_amount_based_on_po(d.po_detail, update_modified)
for pr in set(updated_pr):
from erpnext.stock.doctype.purchase_receipt.purchase_receipt import update_billing_percentage
pr_doc = frappe.get_doc("Purchase Receipt", pr)
update_billing_percentage(pr_doc, update_modified=update_modified)
def on_recurring(self, reference_doc, auto_repeat_doc):
self.due_date = None
def block_invoice(self, hold_comment=None, release_date=None):
self.db_set('on_hold', 1)
self.db_set('hold_comment', cstr(hold_comment))
self.db_set('release_date', release_date)
def unblock_invoice(self):
self.db_set('on_hold', 0)
self.db_set('release_date', None)
def set_tax_withholding(self):
if not self.apply_tds:
return
tax_withholding_details = get_party_tax_withholding_details(self, self.tax_withholding_category)
if not tax_withholding_details:
return
accounts = []
for d in self.taxes:
if d.account_head == tax_withholding_details.get("account_head"):
d.update(tax_withholding_details)
accounts.append(d.account_head)
if not accounts or tax_withholding_details.get("account_head") not in accounts:
self.append("taxes", tax_withholding_details)
to_remove = [d for d in self.taxes
if not d.tax_amount and d.account_head == tax_withholding_details.get("account_head")]
for d in to_remove:
self.remove(d)
# calculate totals again after applying TDS
self.calculate_taxes_and_totals()
def set_status(self, update=False, status=None, update_modified=True):
if self.is_new():
if self.get('amended_from'):
self.status = 'Draft'
return
precision = self.precision("outstanding_amount")
outstanding_amount = flt(self.outstanding_amount, precision)
due_date = getdate(self.due_date)
nowdate = getdate()
if not status:
if self.docstatus == 2:
status = "Cancelled"
elif self.docstatus == 1:
if self.is_internal_transfer():
self.status = 'Internal Transfer'
elif outstanding_amount > 0 and due_date < nowdate:
self.status = "Overdue"
elif outstanding_amount > 0 and due_date >= nowdate:
self.status = "Unpaid"
#Check if outstanding amount is 0 due to debit note issued against invoice
elif outstanding_amount <= 0 and self.is_return == 0 and frappe.db.get_value('Purchase Invoice', {'is_return': 1, 'return_against': self.name, 'docstatus': 1}):
self.status = "Debit Note Issued"
elif self.is_return == 1:
self.status = "Return"
elif outstanding_amount<=0:
self.status = "Paid"
else:
self.status = "Submitted"
else:
self.status = "Draft"
if update:
self.db_set('status', self.status, update_modified = update_modified)
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context.update({
'show_sidebar': True,
'show_search': True,
'no_breadcrumbs': True,
'title': _('Purchase Invoices'),
})
return list_context
@erpnext.allow_regional
def make_regional_gl_entries(gl_entries, doc):
return gl_entries
@frappe.whitelist()
def make_debit_note(source_name, target_doc=None):
from erpnext.controllers.sales_and_purchase_return import make_return_doc
return make_return_doc("Purchase Invoice", source_name, target_doc)
@frappe.whitelist()
def make_stock_entry(source_name, target_doc=None):
doc = get_mapped_doc("Purchase Invoice", source_name, {
"Purchase Invoice": {
"doctype": "Stock Entry",
"validation": {
"docstatus": ["=", 1]
}
},
"Purchase Invoice Item": {
"doctype": "Stock Entry Detail",
"field_map": {
"stock_qty": "transfer_qty",
"batch_no": "batch_no"
},
}
}, target_doc)
return doc
@frappe.whitelist()
def change_release_date(name, release_date=None):
if frappe.db.exists('Purchase Invoice', name):
pi = frappe.get_doc('Purchase Invoice', name)
pi.db_set('release_date', release_date)
@frappe.whitelist()
def unblock_invoice(name):
if frappe.db.exists('Purchase Invoice', name):
pi = frappe.get_doc('Purchase Invoice', name)
pi.unblock_invoice()
@frappe.whitelist()
def block_invoice(name, release_date, hold_comment=None):
if frappe.db.exists('Purchase Invoice', name):
pi = frappe.get_doc('Purchase Invoice', name)
pi.block_invoice(hold_comment, release_date)
@frappe.whitelist()
def make_inter_company_sales_invoice(source_name, target_doc=None):
from erpnext.accounts.doctype.sales_invoice.sales_invoice import make_inter_company_transaction
return make_inter_company_transaction("Purchase Invoice", source_name, target_doc)
def on_doctype_update():
frappe.db.add_index("Purchase Invoice", ["supplier", "is_return", "return_against"])
|
ESS-LLP/erpnext
|
erpnext/accounts/doctype/purchase_invoice/purchase_invoice.py
|
Python
|
gpl-3.0
| 50,618
|
# coding: utf-8
from __future__ import unicode_literals
import datetime
from decimal import Decimal
from hashlib import sha1
from time import time
from uuid import UUID
from django.contrib.postgres.functions import TransactionNow
from django.db import connections
from django.db.models import QuerySet, Subquery, Exists
from django.db.models.functions import Now
from django.db.models.sql import Query, AggregateQuery
from django.db.models.sql.where import ExtraWhere, WhereNode
from django.utils.six import text_type, binary_type, integer_types
from .settings import ITERABLES, cachalot_settings
from .transaction import AtomicCache
class UncachableQuery(Exception):
pass
class IsRawQuery(Exception):
pass
CACHABLE_PARAM_TYPES = {
bool, int, float, Decimal, bytearray, binary_type, text_type, type(None),
datetime.date, datetime.time, datetime.datetime, datetime.timedelta, UUID,
}
CACHABLE_PARAM_TYPES.update(integer_types) # Adds long for Python 2
UNCACHABLE_FUNCS = {Now, TransactionNow}
try:
from psycopg2 import Binary
from psycopg2.extras import (
NumericRange, DateRange, DateTimeRange, DateTimeTZRange, Inet, Json)
from django.contrib.postgres.fields.jsonb import JsonAdapter
except ImportError:
pass
else:
CACHABLE_PARAM_TYPES.update((
Binary, NumericRange, DateRange, DateTimeRange, DateTimeTZRange, Inet,
Json, JsonAdapter))
def check_parameter_types(params):
for p in params:
cl = p.__class__
if cl not in CACHABLE_PARAM_TYPES:
if cl in ITERABLES:
check_parameter_types(p)
elif cl is dict:
check_parameter_types(p.items())
else:
raise UncachableQuery
def get_query_cache_key(compiler):
"""
Generates a cache key from a SQLCompiler.
This cache key is specific to the SQL query and its context
(which database is used). The same query in the same context
(= the same database) must generate the same cache key.
:arg compiler: A SQLCompiler that will generate the SQL query
:type compiler: django.db.models.sql.compiler.SQLCompiler
:return: A cache key
:rtype: int
"""
sql, params = compiler.as_sql()
check_parameter_types(params)
cache_key = '%s:%s:%s' % (compiler.using, sql,
[text_type(p) for p in params])
return sha1(cache_key.encode('utf-8')).hexdigest()
def get_table_cache_key(db_alias, table):
"""
Generates a cache key from a SQL table.
:arg db_alias: Alias of the used database
:type db_alias: str or unicode
:arg table: Name of the SQL table
:type table: str or unicode
:return: A cache key
:rtype: int
"""
cache_key = '%s:%s' % (db_alias, table)
return sha1(cache_key.encode('utf-8')).hexdigest()
def _get_tables_from_sql(connection, lowercased_sql):
return {t for t in connection.introspection.django_table_names()
if t in lowercased_sql}
def _find_subqueries_in_where(children):
for child in children:
child_class = child.__class__
if child_class is WhereNode:
for grand_child in _find_subqueries_in_where(child.children):
yield grand_child
elif child_class is ExtraWhere:
raise IsRawQuery
else:
rhs = child.rhs
rhs_class = rhs.__class__
if rhs_class is Query:
yield rhs
elif rhs_class is QuerySet:
yield rhs.query
elif rhs_class is Subquery or rhs_class is Exists:
yield rhs.queryset.query
elif rhs_class in UNCACHABLE_FUNCS:
raise UncachableQuery
def is_cachable(table):
whitelist = cachalot_settings.CACHALOT_ONLY_CACHABLE_TABLES
if whitelist and table not in whitelist:
return False
return table not in cachalot_settings.CACHALOT_UNCACHABLE_TABLES
def are_all_cachable(tables):
whitelist = cachalot_settings.CACHALOT_ONLY_CACHABLE_TABLES
if whitelist and not tables.issubset(whitelist):
return False
return tables.isdisjoint(cachalot_settings.CACHALOT_UNCACHABLE_TABLES)
def filter_cachable(tables):
whitelist = cachalot_settings.CACHALOT_ONLY_CACHABLE_TABLES
tables = tables.difference(cachalot_settings.CACHALOT_UNCACHABLE_TABLES)
if whitelist:
return tables.intersection(whitelist)
return tables
def _get_tables(db_alias, query):
if query.select_for_update or (
not cachalot_settings.CACHALOT_CACHE_RANDOM
and '?' in query.order_by):
raise UncachableQuery
try:
if query.extra_select:
raise IsRawQuery
# Gets all tables already found by the ORM.
tables = set(query.table_map)
tables.add(query.get_meta().db_table)
# Gets tables in subquery annotations.
for annotation in query.annotations.values():
if isinstance(annotation, Subquery):
tables.update(_get_tables(db_alias, annotation.queryset.query))
# Gets tables in WHERE subqueries.
for subquery in _find_subqueries_in_where(query.where.children):
tables.update(_get_tables(db_alias, subquery))
# Gets tables in HAVING subqueries.
if isinstance(query, AggregateQuery):
tables.update(
_get_tables_from_sql(connections[db_alias], query.subquery))
# Gets tables in combined queries
# using `.union`, `.intersection`, or `difference`.
if query.combined_queries:
for combined_query in query.combined_queries:
tables.update(_get_tables(db_alias, combined_query))
except IsRawQuery:
sql = query.get_compiler(db_alias).as_sql()[0].lower()
tables = _get_tables_from_sql(connections[db_alias], sql)
if not are_all_cachable(tables):
raise UncachableQuery
return tables
def _get_table_cache_keys(compiler):
db_alias = compiler.using
get_table_cache_key = cachalot_settings.CACHALOT_TABLE_KEYGEN
return [get_table_cache_key(db_alias, t)
for t in _get_tables(db_alias, compiler.query)]
def _invalidate_tables(cache, db_alias, tables):
tables = filter_cachable(set(tables))
if not tables:
return
now = time()
get_table_cache_key = cachalot_settings.CACHALOT_TABLE_KEYGEN
cache.set_many(
{get_table_cache_key(db_alias, t): now for t in tables},
cachalot_settings.CACHALOT_TIMEOUT)
if isinstance(cache, AtomicCache):
cache.to_be_invalidated.update(tables)
|
BertrandBordage/django-cachalot
|
cachalot/utils.py
|
Python
|
bsd-3-clause
| 6,617
|
# This file is part of jenkins-epo
#
# jenkins-epo is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or any later version.
#
# jenkins-epo is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# jenkins-epo. If not, see <http://www.gnu.org/licenses/>.
import asyncio
from collections import OrderedDict
import logging
from aiohttp.errors import HttpProcessingError
from ..bot import Extension, Error, SkipHead
from ..jenkins import Build, JENKINS, NotOnJenkins, UnknownJob
from ..repository import Commit, CommitStatus
from ..utils import log_context, match
logger = logging.getLogger(__name__)
class JenkinsExtension(Extension):
def is_enabled(self, settings):
return bool(settings.JENKINS_URL)
class BackedExtension(JenkinsExtension):
stage = '20'
@asyncio.coroutine
def run(self):
missing_contextes = [
c
for spec in self.current.job_specs.values()
for c in self.current.jobs[spec.name].list_contexts(spec)
if c not in self.current.statuses
]
loop = asyncio.get_event_loop()
tasks = [
loop.create_task(
self.current.last_commit.maybe_update_status(
dict(
context=context,
description='Backed',
state='pending',
)
)
)
for context in missing_contextes
]
yield from asyncio.gather(*tasks)
class BuilderExtension(JenkinsExtension):
"""
jenkins: rebuild # Retry failed jobs
"""
DEFAULTS = {
'rebuild_failed': None,
}
def process_instruction(self, instruction):
if instruction == 'rebuild':
logger.info("Retrying jobs failed before %s.", instruction.date)
self.current.rebuild_failed = instruction.date
@asyncio.coroutine
def process_job_spec(self, spec):
log_context(self.current.head)
update_status = self.current.last_commit.maybe_update_status
logger.debug("Processing %s.", spec)
job = self.current.jobs[spec.name]
not_built = self.current.last_commit.filter_not_built_contexts(
job.list_contexts(spec),
rebuild_failed=self.current.rebuild_failed
)
queue_empty = yield from JENKINS.is_queue_empty()
toqueue_contexts = []
for context in not_built:
logger.debug("Computing next state for %s.", context)
new_status = self.status_for_new_context(
job, context, queue_empty,
)
yield from update_status(new_status)
if new_status.get('description') == 'Queued':
toqueue_contexts.append(context)
if toqueue_contexts and queue_empty:
try:
yield from job.build(self.current.head, spec, toqueue_contexts)
except Exception as e:
if self.current.SETTINGS.DEBUG:
raise
logger.exception("Failed to queue job %s: %s.", job, e)
for context in toqueue_contexts:
new_status = CommitStatus(
context=context, state='error',
description='Failed to queue job.',
target_url=job.baseurl,
)
yield from update_status(new_status)
def status_for_new_context(self, job, context, queue_empty):
new_status = CommitStatus(target_url=job.baseurl, context=context)
if not job.enabled:
new_status.update({
'description': 'Disabled on Jenkins.',
'state': 'success',
})
else:
current_status = self.current.statuses.get(context, {})
already_queued = 'Queued' == current_status.get('description')
queued = queue_empty or already_queued
new_status.update({
'description': 'Queued' if queued else 'Backed',
'state': 'pending',
})
return new_status
class CancellerExtension(JenkinsExtension):
stage = '49'
def aggregate_queues(self, cancel_queue, poll_queue):
for commit, status in cancel_queue:
yield commit, status, True
for commit, status in poll_queue:
yield commit, status, False
@asyncio.coroutine
def poll_build(self, commit, status, cancel):
log_context(self.current.head)
logger.debug("Query Jenkins %s status for %s.", status, commit)
try:
build = yield from Build.from_url(status['target_url'])
except HttpProcessingError as e:
logger.warn(
"Failed to get %s: %s %s",
status['target_url'], e.code, e.message,
)
return
except NotOnJenkins as e:
logger.debug("%s not on this Jenkins", status['target_url'])
return
if cancel and build.is_running:
if self.current.SETTINGS.DRY_RUN:
logger.warn("Would cancel %s.", build)
else:
logger.warn("Cancelling %s.", build)
yield from build.stop()
last_status = self.current.statuses.get(status['context'], {})
if last_status.get('state') != 'success':
new_status = status.__class__(
status, state='error', description='Cancelled after push.'
)
else:
new_status = last_status
else:
new_status = CommitStatus(status, **build.commit_status)
yield from commit.maybe_update_status(new_status)
@asyncio.coroutine
def run(self):
aggregated_queue = self.aggregate_queues(
self.current.cancel_queue, self.current.poll_queue
)
logger.info("Polling job statuses on Jenkins.")
loop = asyncio.get_event_loop()
tasks = [
loop.create_task(self.poll_build(*args))
for args in aggregated_queue
]
yield from asyncio.gather(*tasks)
class CreateJobsExtension(JenkinsExtension):
"""
jenkins: refresh-jobs # Refresh job definition on Jenkins.
"""
stage = '05'
DEFAULTS = {
'jobs': {},
'job_specs': {},
'refresh_jobs': {},
}
JOB_ERROR_COMMENT = """\
Failed to create or update Jenkins job `%(name)s`.
```
%(error)s
%(detail)s
```
"""
def process_instruction(self, instruction):
if instruction == 'refresh-jobs':
self.current.refresh_jobs = instruction.date
def process_job_specs(self):
for spec in self.current.job_specs.values():
current_job = self.current.jobs.get(spec.name)
if not current_job:
yield JENKINS.create_job, spec
continue
update = False
if self.current.refresh_jobs:
update = (
not current_job.updated_at or
self.current.refresh_jobs >= current_job.updated_at
)
if not current_job.spec.contains(spec):
spec = current_job.spec.merge(spec)
update = True
if update:
yield current_job.update, spec
@asyncio.coroutine
def fetch_job(self, name):
log_context(self.current.head)
if name in self.current.jobs:
return
try:
self.current.jobs[name] = yield from JENKINS.aget_job(name)
except UnknownJob:
pass
@asyncio.coroutine
def process_job(self, action, spec):
log_context(self.current.head)
job = None
try:
job = yield from action(spec)
except Exception as e:
self.current.errors.append(self.process_error(spec, e))
if not job:
return
self.current.jobs[job.name] = job
if spec.config.get('periodic'):
yield from self.current.last_commit.push_status(CommitStatus(
context=job.name, state='success',
target_url=job.baseurl, description='Created!',
))
@asyncio.coroutine
def run(self):
logger.info("Fetching jobs from Jenkins.")
loop = asyncio.get_event_loop()
tasks = [
loop.create_task(self.fetch_job(name))
for name in self.current.job_specs
]
yield from asyncio.gather(*tasks)
tasks = [
loop.create_task(self.process_job(action, spec))
for action, spec in self.process_job_specs()
]
yield from asyncio.gather(*tasks)
def process_error(self, spec, e):
detail = (
e.args[0]
.replace('\\n', '\n')
.replace('\\t', '\t')
)
logger.error(
"Failed to manage job %r:\n%s", spec.name, detail
)
return Error(
self.JOB_ERROR_COMMENT % dict(
name=spec.name, error=e, detail=detail,
),
self.current.last_commit.date,
)
class PollExtension(JenkinsExtension):
stage = '30'
def iter_preset_statuses(self, contextes, build):
for context in contextes:
default_status = CommitStatus(
context=context, state='pending', description='Backed',
)
status = self.current.statuses.get(
context, default_status,
)
new_url = status.get('target_url') == build.url
if status.is_queueable or new_url:
status = CommitStatus(status, **build.commit_status)
yield status
@asyncio.coroutine
def poll_job(self, spec):
log_context(self.current.head)
asyncio.Task.current_task().logging_id = self.current.head.sha[:4]
job = self.current.jobs[spec.name]
payload = yield from job.fetch_builds()
contextes = job.list_contexts(spec)
for build in job.process_builds(payload):
if not build.is_running:
continue
if build.is_outdated:
break
if build.ref != self.current.head.ref:
continue
try:
build_sha = build.sha
except Exception:
build_sha = self.current.head.sha
if build_sha == self.current.head.sha:
commit = self.current.last_commit
preset_statuses = self.iter_preset_statuses(
contextes, build,
)
for status in preset_statuses:
logger.info(
"Preset pending status for %s.", status['context'],
)
yield from commit.maybe_update_status(status)
continue
else:
commit = Commit(self.current.head.repository, build.sha)
status = CommitStatus(context=job.name, **build.commit_status)
logger.info("Queuing %s for cancel.", build)
self.current.cancel_queue.append((commit, status))
logger.debug("Polling %s done.", spec.name)
@asyncio.coroutine
def run(self):
logger.info("Polling running builds on Jenkins.")
tasks = []
loop = asyncio.get_event_loop()
for name, spec in self.current.job_specs.items():
tasks.append(
loop.create_task(self.poll_job(spec))
)
yield from asyncio.gather(*tasks)
class Stage(object):
@classmethod
def factory(cls, entry):
if isinstance(entry, str):
entry = dict(name=entry)
return cls(**entry)
def __init__(self, name, external=None, **kw):
self.name = name
self.job_specs = []
self.external_contextes = external or []
self.statuses = []
def __bool__(self):
return bool(self.job_specs or self.external_contextes)
def __str__(self):
return self.name
def is_complete(self, jobs, statuses):
for context in self.external_contextes:
state = statuses.get(context, {}).get('state')
if state != 'success':
logger.debug("Missing context %s for stage %s.", context, self)
return False
for spec in self.job_specs:
try:
job = jobs[spec.name]
except KeyError:
continue
for context in job.list_contexts(spec):
state = statuses.get(context, {}).get('state')
if state != 'success':
logger.debug("Missing job %s for stage %s.", spec, self)
return False
return True
class StagesExtension(JenkinsExtension):
stage = '10'
SETTINGS = {
'STAGES': ['build', 'test', 'deploy'],
}
@asyncio.coroutine
def run(self):
stages = [Stage.factory(i) for i in self.current.SETTINGS.STAGES]
# First, group jobs by stages
self.current.stages = stages = OrderedDict(
[(s.name, s) for s in stages],
)
default_stage = 'test' if 'test' in stages else list(stages.keys())[0]
for spec in self.current.job_specs.values():
if spec.config.get('periodic') and not spec.config.get('stage'):
logger.debug("Skipping %s with no explicit stage.", spec)
continue
stage = spec.config.get('stage', default_stage)
stages[stage].job_specs.append(spec)
stage = None
# Search current stage to build.
for stage in [s for s in stages.values() if bool(s)]:
complete = stage.is_complete(
self.current.jobs, self.current.statuses
)
if not complete:
break
if not stage:
logger.warn("Not in any stage. Skipping.")
raise SkipHead()
self.current.current_stage = stage
# Filter job specs to the current stage ones.
current_ref = self.current.head.ref
self.current.job_specs = {}
for spec in stage.job_specs:
branches = spec.config.get('branches', ['*'])
if not isinstance(branches, list):
branches = [branches]
if not match(current_ref, branches):
logger.debug("Ignore job %s on this branch.", spec)
continue
if spec.config.get('periodic'):
logger.debug("Ignore periodic job %s.", spec)
continue
self.current.job_specs[spec.name] = spec
logger.info(
"Current stage is %s. Completed=%s. Jobs: %s.",
stage, complete, ', '.join(self.current.job_specs) or 'None',
)
|
novafloss/jenkins-github-poller
|
jenkins_epo/extensions/jenkins.py
|
Python
|
gpl-3.0
| 15,293
|
###############################################################################
##
## Copyright (c) Crossbar.io Technologies GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
class Case5_2(Case):
DESCRIPTION = """Send Pong fragmented into 2 fragments."""
EXPECTATION = """Connection is failed immediately, since control message MUST NOT be fragmented."""
def onOpen(self):
self.expected[Case.OK] = []
self.expectedClose = {"closedByMe":False,"closeCode":[self.p.CLOSE_STATUS_CODE_PROTOCOL_ERROR],"requireClean":False}
self.p.sendFrame(opcode = 10, fin = False, payload = "fragment1")
self.p.sendFrame(opcode = 0, fin = True, payload = "fragment2")
self.p.killAfter(1)
|
tavendo/AutobahnTestSuite
|
autobahntestsuite/autobahntestsuite/case/case5_2.py
|
Python
|
apache-2.0
| 1,372
|
# -*- coding: utf-8 -*-
"""
conpaas.core.expose
===================
ConPaaS core: expose http methods.
:copyright: (C) 2010-2013 by Contrail Consortium.
"""
exposed_functions = {}
def expose(http_method):
"""
Exposes http methods methods.
:param func: Function to be exposed
:type conf: function
:returns: A decorator to be used in the source code.
"""
def decorator(func):
if http_method not in exposed_functions:
exposed_functions[http_method] = {}
exposed_functions[http_method][func.__name__] = func
def wrapped(self, *args, **kwargs):
return func(self, *args, **kwargs)
return wrapped
return decorator
|
mihaisoloi/conpaas
|
conpaas-services/src/conpaas/core/expose.py
|
Python
|
bsd-3-clause
| 715
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
# Url Patterns from the root.
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^aboutme/', 'blog.views.aboutme'),
url(r'^contact/', 'blog.views.contact'),
url(r'^$', 'blog.views.index'),
url(r'^(?P<slug>[\w\-]+)/$', 'blog.views.post'),
)
|
vikrampriya/portfolio
|
portfolio/urls.py
|
Python
|
mit
| 372
|
"""
Utils.
"""
import copy
from collections import OrderedDict
import inflection
from django.conf import settings
from django.utils import six
from django.utils.module_loading import import_string as import_class_from_dotted_path
from django.utils.translation import ugettext_lazy as _
from rest_framework.exceptions import APIException
try:
from rest_framework.serializers import ManyRelatedField
except ImportError:
ManyRelatedField = type(None)
try:
from rest_framework_nested.relations import HyperlinkedRouterField
except ImportError:
HyperlinkedRouterField = type(None)
def get_resource_name(context):
"""
Return the name of a resource.
"""
view = context.get('view')
# Sanity check to make sure we have a view.
if not view:
raise APIException(_('Could not find view.'))
# Check to see if there is a status code and return early
# with the resource_name value of `errors`.
try:
code = str(view.response.status_code)
except (AttributeError, ValueError):
pass
else:
if code.startswith('4') or code.startswith('5'):
return 'errors'
try:
resource_name = getattr(view, 'resource_name')
except AttributeError:
try:
serializer = view.get_serializer_class()
return get_resource_type_from_serializer(serializer)
except AttributeError:
try:
resource_name = get_resource_type_from_model(view.model)
except AttributeError:
resource_name = view.__class__.__name__
if not isinstance(resource_name, six.string_types):
# The resource name is not a string - return as is
return resource_name
# the name was calculated automatically from the view > pluralize and format
resource_name = format_relation_name(resource_name)
return resource_name
def get_serializer_fields(serializer):
fields = None
if hasattr(serializer, 'child'):
fields = getattr(serializer.child, 'fields')
meta = getattr(serializer.child, 'Meta', None)
if hasattr(serializer, 'fields'):
fields = getattr(serializer, 'fields')
meta = getattr(serializer, 'Meta', None)
if fields:
meta_fields = getattr(meta, 'meta_fields', {})
for field in meta_fields:
try:
fields.pop(field)
except KeyError:
pass
return fields
def format_keys(obj, format_type=None):
"""
Takes either a dict or list and returns it with camelized keys only if
JSON_API_FORMAT_KEYS is set.
:format_type: Either 'dasherize', 'camelize' or 'underscore'
"""
if format_type is None:
format_type = getattr(settings, 'JSON_API_FORMAT_KEYS', False)
if format_type in ('dasherize', 'camelize', 'underscore', 'capitalize'):
if isinstance(obj, dict):
formatted = OrderedDict()
for key, value in obj.items():
if format_type == 'dasherize':
# inflection can't dasherize camelCase
key = inflection.underscore(key)
formatted[inflection.dasherize(key)] \
= format_keys(value, format_type)
elif format_type == 'camelize':
formatted[inflection.camelize(key, False)] \
= format_keys(value, format_type)
elif format_type == 'capitalize':
formatted[inflection.camelize(key)] \
= format_keys(value, format_type)
elif format_type == 'underscore':
formatted[inflection.underscore(key)] \
= format_keys(value, format_type)
return formatted
if isinstance(obj, list):
return [format_keys(item, format_type) for item in obj]
else:
return obj
else:
return obj
def format_value(value, format_type=None):
if format_type is None:
format_type = getattr(settings, 'JSON_API_FORMAT_KEYS', False)
if format_type == 'dasherize':
# inflection can't dasherize camelCase
value = inflection.underscore(value)
value = inflection.dasherize(value)
elif format_type == 'camelize':
value = inflection.camelize(value, False)
elif format_type == 'capitalize':
value = inflection.camelize(value)
elif format_type == 'underscore':
value = inflection.underscore(value)
return value
def format_relation_name(value, format_type=None):
if format_type is None:
format_type = getattr(settings, 'JSON_API_FORMAT_RELATION_KEYS', False)
pluralize = getattr(settings, 'JSON_API_PLURALIZE_RELATION_TYPE', False)
if format_type:
# format_type will never be None here so we can use format_value
value = format_value(value, format_type)
return inflection.pluralize(value) if pluralize else value
def get_related_resource_type(relation):
if hasattr(relation, '_meta'):
relation_model = relation._meta.model
elif hasattr(relation, 'model'):
# the model type was explicitly passed as a kwarg to ResourceRelatedField
relation_model = relation.model
elif hasattr(relation, 'get_queryset') and relation.get_queryset() is not None:
relation_model = relation.get_queryset().model
else:
parent_serializer = relation.parent
if hasattr(parent_serializer, 'Meta'):
parent_model = parent_serializer.Meta.model
else:
parent_model = parent_serializer.parent.Meta.model
if relation.source:
if relation.source != '*':
parent_model_relation = getattr(parent_model, relation.source)
else:
parent_model_relation = getattr(parent_model, relation.field_name)
else:
parent_model_relation = getattr(parent_model, parent_serializer.field_name)
if hasattr(parent_model_relation, 'related'):
try:
relation_model = parent_model_relation.related.related_model
except AttributeError:
# Django 1.7
relation_model = parent_model_relation.related.model
elif hasattr(parent_model_relation, 'field'):
relation_model = parent_model_relation.field.related.model
else:
return get_related_resource_type(parent_model_relation)
return get_resource_type_from_model(relation_model)
def get_instance_or_manager_resource_type(resource_instance_or_manager):
if hasattr(resource_instance_or_manager, 'model'):
return get_resource_type_from_manager(resource_instance_or_manager)
if hasattr(resource_instance_or_manager, '_meta'):
return get_resource_type_from_instance(resource_instance_or_manager)
pass
def get_resource_type_from_model(model):
json_api_meta = getattr(model, 'JSONAPIMeta', None)
return getattr(
json_api_meta,
'resource_name',
format_relation_name(model.__name__))
def get_resource_type_from_queryset(qs):
return get_resource_type_from_model(qs.model)
def get_resource_type_from_instance(instance):
return get_resource_type_from_model(instance._meta.model)
def get_resource_type_from_manager(manager):
return get_resource_type_from_model(manager.model)
def get_resource_type_from_serializer(serializer):
return getattr(
serializer.Meta,
'resource_name',
get_resource_type_from_model(serializer.Meta.model))
def get_included_serializers(serializer):
included_serializers = copy.copy(getattr(serializer, 'included_serializers', dict()))
for name, value in six.iteritems(included_serializers):
if not isinstance(value, type):
if value == 'self':
included_serializers[name] = serializer if isinstance(serializer, type) else serializer.__class__
else:
included_serializers[name] = import_class_from_dotted_path(value)
return included_serializers
class Hyperlink(six.text_type):
"""
A string like object that additionally has an associated name.
We use this for hyperlinked URLs that may render as a named link
in some contexts, or render as a plain URL in others.
Comes from Django REST framework 3.2
https://github.com/tomchristie/django-rest-framework
"""
def __new__(self, url, name):
ret = six.text_type.__new__(self, url)
ret.name = name
return ret
is_hyperlink = True
|
leo-naeka/django-rest-framework-json-api
|
rest_framework_json_api/utils.py
|
Python
|
bsd-2-clause
| 8,634
|
from django.db import models
from django.utils import timezone
# For teammate manager and modification of Django's base user auth system
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
# Manager class that instantiates the creation of a new teammate
# source (i think): http://procrastinatingdev.com/django/using-configurable-user-models-in-django-1-5/
class TeammateManager(BaseUserManager):
# Base create teammate method
def _create_user(self, email, first_name, last_name, date_of_birth, password,
is_staff, is_superuser, **extra_fields):
# Define the time
now = timezone.now()
# Check for an email
if not email:
# If none, tell the user
raise ValueError('The teammate must set an email!')
# Clean and assign data
email = self.normalize_email(email)
user = self.model(email = email, is_staff = is_staff,
is_active = True, is_superuser = is_superuser,
last_login = now, first_name = first_name,
last_name = last_name, date_of_birth = date_of_birth,
date_joined = now, **extra_fields)
# Ready to insert into database
user.set_password(password)
user.save(using = self._db)
return user
# Method to create a normal teammate; has not access to admin panel
def create_user(self, email, first_name, last_name, date_of_birth, password, **extra_fields):
# Utilizes the base creation method
return self._create_user(email, first_name, last_name, date_of_birth, password, False, False, **extra_fields)
# Method to create a 'superuser'; has access to EVERYTHING
def create_superuser(self, email, first_name, last_name, date_of_birth, password, **extra_fields):
# Utilize the base creation method
return self._create_user(email, first_name, last_name, date_of_birth, password, True, True, **extra_fields)
# Model that holds the teammates' data
class Teammate(AbstractBaseUser, PermissionsMixin):
email = models.EmailField('email address', max_length = 100, unique = True, db_index = True)
first_name = models.CharField('first name', max_length = 35)
last_name = models.CharField('last name', max_length = 35)
title = models.CharField('title', max_length = 100, default = 'Rider')
ROUTE_CHOICES = (
( 'sierra', 'Sierra' ),
( 'rockies', 'Rockies' ),
( 'ozarks', 'Ozarks' )
)
route = models.CharField(max_length=20, choices=ROUTE_CHOICES)
date_of_birth = models.DateField('date of birth', help_text = 'YYYY-MM-DD format')
is_staff = models.BooleanField('leadership status', default = False,
help_text = 'Designates who may login to the admin area')
is_active = models.BooleanField('active status', default = True,
help_text = 'Designates whether or not the teammate can login')
date_joined = models.DateTimeField('date added', default = timezone.now)
first_login = models.BooleanField('has logged in', default = True)
# Defines the class that manages the teammate
objects = TeammateManager()
# Defines what field will be used as the username
USERNAME_FIELD = 'email'
# Defines what field(s) will be required
REQUIRED_FIELDS = ['first_name', 'last_name', 'date_of_birth']
# Metadata about the model
class Meta:
verbose_name = 'teammate'
verbose_name_plural = 'teammates'
# Methods to return values about the teammate
def get_full_name(self):
full_name = '%s %s' % (self.first_name, self.last_name)
# Returns the full name
return full_name.strip()
get_full_name.short_description = 'Teammate'
def get_short_name(self):
# Returns the first name
return self.first_name
def get_email(self):
# Returns the email address
return self.email
def get_title(self):
# Returns the title
return self.title
def get_route(self):
# Returns the route
return self.route
def get_dob(self):
# Returns the DOB
return self.date_of_birth
# Base called function
def __unicode__(self):
return self.get_full_name()
|
ethanperez/t4k-rms
|
riders/models.py
|
Python
|
mit
| 4,366
|
def main():
pass
def handle_result(args, result, target_window_id, boss):
boss.active_tab.neighboring_window(args[1])
handle_result.no_ui = True
|
sethwoodworth/XDG_CONFIG_HOME
|
kitty/neighboring_window.py
|
Python
|
gpl-2.0
| 157
|
import sys
from csv_data import Data
from csv_utilities import readIntegerCSV
from csv_utilities import convertToZeroOne
from statistics import mean
from math import log
def dotProduct( arr1, arr2 ):
return sum( [ arr1[idx] * arr2[idx] for idx in range( len( arr1 ) ) ] )
#
def dataSubsetWithY( data, y ):
return [ row for row in data.rows if row.Y == y ]
#
def probabilityXisZero( data, idx, beta ):
return ( 1 - probabilityXisOne( data, idx, beta ) )
#
def probabilityXisOne ( data, idx, beta ):
return ( mean( ( [ 1, 0 ] * ( beta - 1 ) ) + [ row.X[ idx ] for row in data ] ) )
#
def probabilityXY( data, x, idx, y, beta ):
return ( probabilityXisOne( dataSubsetWithY( data, y ), idx, beta ) if x == 1 else probabilityXisZero( dataSubsetWithY( data, y ), idx, beta ) )
#
def probabilityYisZero( data, beta ):
return ( 1 - probabilityYisOne( data, beta ) )
#
def probabilityYisOne ( data, beta ):
return ( mean( ( [ 1, 0 ] * ( beta - 1 ) ) + [ row.Y for row in data.rows ] ) )
#
def findBias( data, beta ):
return ( log( probabilityYisZero( data, beta ) / probabilityYisOne( data, beta ), 2 )
+ sum( [ log( probabilityXY( data, 0, idx, 1, beta ) / probabilityXY( data, 0, idx, 0, beta ), 2 ) for idx in range( data.attributeCount ) ] ) )
#
def findWeights( data, beta ):
return ( [ log( probabilityXY( data, 1, idx, 1, beta ) / probabilityXY( data, 1, idx, 0, beta ), 2 )
- log( probabilityXY( data, 0, idx, 1, beta ) / probabilityXY( data, 0, idx, 0, beta ), 2 ) for idx in range( data.attributeCount ) ] )
#
def rowPrediction( X, W, b ):
return ( 1 if ( dotProduct( X, W ) + b >= 0 ) else 0 )
#
def getResults( testing_data, W, b ):
return ( len( [ 1 for row in testing_data.rows if row.Y == rowPrediction( row.X, W, b ) ] ) / len( testing_data.rows ) )
#
def printModel( model_stream, attrs, W, b ):
model_stream.write( "{}\n".format( round( b, 4 ) ) )
for idx, attr in enumerate( attrs ):
model_stream.write( "{:16}\t{}\n".format( attr, round( W[ idx ], 4 ) ) )
def main( argv ):
try:
training_data = Data( argv[ 0 ], readIntegerCSV, convertToZeroOne )
testing_data = Data( argv[ 1 ], readIntegerCSV, convertToZeroOne )
beta = int ( argv[ 2 ] )
model = open( argv[ 3 ], 'w+' )
b = findBias( training_data, beta )
W = findWeights( training_data, beta )
rez = getResults( testing_data, W, b )
print( rez )
printModel( model, training_data.attributes, W, b )
except IndexError:
print( "ERROR: \"python3 nb.py <train> <test> <beta> <model>\"" )
finally:
model.close()
#
if __name__=='__main__':
main( sys.argv[ 1: ] )
|
CKPalk/MachineLearning
|
Assignment3/Naive_Bayes/nb.py
|
Python
|
mit
| 2,586
|
#!/usr/bin/env python3
import os
import unittest
import logging
from rdflib import URIRef
from dipper import curie_map
from dipper.graph.RDFGraph import RDFGraph
from dipper.utils.CurieUtil import CurieUtil
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class RDFGraphTestCase(unittest.TestCase):
def setUp(self):
self.graph = RDFGraph()
this_curie_map = curie_map.get()
self.cutil = CurieUtil(this_curie_map)
# stuff to make test triples
self.test_cat_subj = "http://www.google.com"
self.test_cat_default_pred = self.cutil.get_uri("biolink:category")
self.test_cat_nondefault_pred = self.cutil.get_uri("rdf:type")
self.test_cat_default_category = self.cutil.get_uri("biolink:NamedThing")
self.test_cat_nondefault_category = self.cutil.get_uri("biolink:Gene")
self.test_cat_type = self.cutil.get_uri("rdf:type")
self.test_cat_class = self.cutil.get_uri("rdf:class")
def tearDown(self):
self.graph = None
def test_add_triple_makes_triple(self):
"""
test that addTriple() makes at least one triple
"""
self.graph.addTriple(subject_id=self.test_cat_subj,
predicate_id="rdf:type",
obj="rdf:class")
self.assertTrue(len(self.graph) > 0, "addTriples() didn't make >=1 triple")
def test_add_triple_subject_category_assignment(self):
"""
test that addTriple() correctly assigns subject category
"""
self.graph.addTriple(subject_id=self.test_cat_subj,
predicate_id="rdf:comment",
obj="website",
subject_category=self.test_cat_nondefault_category)
triples = list(self.graph.triples((URIRef(self.test_cat_subj),
URIRef(self.test_cat_default_pred),
None)))
self.assertEqual(len(triples), 1,
"addTriples() didn't make exactly one triple subject category")
self.assertEqual(triples[0][2], URIRef(self.test_cat_nondefault_category),
"addTriples() didn't assign the right triple subject category")
def test_add_triple_object_category_assignment(self):
"""
test that addTriple() correctly assigns object category
"""
self.graph.addTriple(subject_id=self.test_cat_subj,
predicate_id=self.test_cat_type,
obj=self.test_cat_class,
object_category=self.test_cat_nondefault_category)
triples = list(self.graph.triples((URIRef(self.test_cat_class),
URIRef(self.test_cat_default_pred),
None)))
self.assertEqual(len(triples), 1,
"addTriples() didn't make exactly one triple object category")
self.assertEqual(triples[0][2], URIRef(self.test_cat_nondefault_category),
"addTriples() didn't assign the right triple object category")
def read_graph_from_turtle_file(self, f):
"""
This will read the specified file into a graph. A simple parsing test.
:param f:
:return:
"""
vg = RDFGraph()
p = os.path.abspath(f)
logger.info("Testing reading turtle file from %s", p)
vg.parse(f, format="turtle")
logger.info('Found %s graph nodes in %s', len(vg), p)
self.assertTrue(len(vg) > 0, "No nodes found in "+p)
return
def read_graph_into_owl(self, f):
"""
test if the ttl can be parsed by owlparser
this expects owltools to be accessible from commandline
:param f: file of ttl
:return:
"""
import subprocess
from subprocess import check_call
status = check_call(["owltools", f], stderr=subprocess.STDOUT)
# returns zero is success!
if status != 0:
logger.error(
'finished verifying with owltools with status %s', status)
self.assertTrue(status == 0)
return
def test_make_category_triple_default(self):
"""
test that method adds category triple to graph correctly (default pred and obj)
"""
self.graph._make_category_triple(self.test_cat_subj)
triples = list(self.graph.triples((None, None, None)))
self.assertEqual(len(triples), 1, "method didn't make exactly one triple")
self.assertEqual(triples[0][0], URIRef(self.test_cat_subj),
"didn't assign correct subject")
self.assertEqual(triples[0][1], URIRef(self.test_cat_default_pred),
"didn't assign correct predicate")
self.assertEqual(triples[0][2], URIRef(self.test_cat_default_category),
"didn't assign correct category")
def test_make_category_triple_non_default_category(self):
"""
test that method adds category triple to graph correctly
"""
self.graph._make_category_triple(self.test_cat_subj,
self.test_cat_nondefault_category)
triples = list(self.graph.triples((None, None, None)))
self.assertEqual(len(triples), 1, "method didn't make exactly one triple")
self.assertEqual(URIRef(self.test_cat_nondefault_category),
triples[0][2],
"didn't assign correct (non-default) category")
def test_make_category_triple_non_default_pred(self):
"""
test that method adds category triple to graph correctly (non default pred)
"""
self.graph._make_category_triple(self.test_cat_subj,
self.test_cat_default_category,
predicate=self.test_cat_nondefault_pred)
triples = list(self.graph.triples((None, None, None)))
self.assertEqual(len(triples), 1, "method didn't make exactly one triple")
self.assertEqual(URIRef(self.test_cat_nondefault_pred),
triples[0][1],
"didn't assign correct (non-default) category")
def test_make_category_triple_category_none_should_emit_named_thing(self):
"""
test that method adds category triple to graph correctly (default pred and obj)
"""
self.graph._make_category_triple(self.test_cat_subj, category=None)
triples = list(self.graph.triples((None, None, None)))
self.assertEqual(len(triples), 1, "method didn't make exactly one triple")
self.assertEqual(URIRef(self.test_cat_default_category),
triples[0][2],
"didn't assign correct default category")
def test_is_literal(self):
"""
test that method infers type (either literal or CURIE) correctly
"""
self.assertTrue(self.graph._is_literal("1"))
self.assertTrue(not self.graph._is_literal("foo:bar"))
self.assertTrue(not self.graph._is_literal("http://www.zombo.com/"))
self.assertTrue(not self.graph._is_literal("https://www.zombo.com/"))
self.assertTrue(not self.graph._is_literal("ftp://ftp.1000genomes.ebi.ac.uk/"))
if __name__ == '__main__':
unittest.main()
|
TomConlin/dipper
|
tests/test_rdfgraph.py
|
Python
|
bsd-3-clause
| 7,490
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateSource
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-securitycenter
# [START securitycenter_v1_generated_SecurityCenter_CreateSource_async]
from google.cloud import securitycenter_v1
async def sample_create_source():
# Create a client
client = securitycenter_v1.SecurityCenterAsyncClient()
# Initialize request argument(s)
request = securitycenter_v1.CreateSourceRequest(
parent="parent_value",
)
# Make the request
response = await client.create_source(request=request)
# Handle the response
print(response)
# [END securitycenter_v1_generated_SecurityCenter_CreateSource_async]
|
googleapis/python-securitycenter
|
samples/generated_samples/securitycenter_v1_generated_security_center_create_source_async.py
|
Python
|
apache-2.0
| 1,502
|
# Copyright (C) 2013 ABRT Team
# Copyright (C) 2013 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
import os
from pyfaf.common import FafError, Plugin, import_dir, load_plugin_types
__all__ = ["Repo", "repo_types"]
# Invalid name "repos" for type constant
# pylint: disable-msg=C0103
repo_types = {}
# pylint: enable-msg=C0103
class Repo(Plugin):
"""
A common superclass for repository plugins.
"""
def __init__(self, *args, **kwargs):
"""
The superclass constructor does not really need to be called, but it
enables a few useful features (like unified logging). If not called
by the child, it just makes sure that Repo class is not instantiated
directly.
"""
if self.__class__.__name__ == "Repo":
raise FafError("You need to subclass the Repo class "
"in order to implement a repository plugin.")
super(Repo, self).__init__()
def list_packages(self):
"""
Return list of packages available in this repository.
"""
raise NotImplementedError
import_dir(__name__, os.path.dirname(__file__))
load_plugin_types(Repo, repo_types)
|
patriczek/faf
|
src/pyfaf/repos/__init__.py
|
Python
|
gpl-3.0
| 1,798
|
import json
import string
import random
from proxy.request import send_request, Request
from proxy.response import Response
from resources.base import Resource
class SSPRResource(Resource):
def __init__(self, service):
super().__init__(service)
self.service = service
def multiple_sspr_random_passwords(self, request):
url = self.service.target_url + "/randompassword"
passwords = []
num = request.args.get('num')
if num is not None and num != '':
num = int(num)
else:
num = 10
if num < 1 or num > 50:
num = 10
for i in range(0, num):
r = send_request(url, request.method, request.headers)
passwords.append(r.text)
return Response(json.dumps({"passwords": passwords}), headers={'Content-type':"application/json"})
def multiple_random_passwords(self, request):
passwords = []
num = request.args.get('num')
if num is not None and num != '':
num = int(num)
else:
num = 10
if num < 1 or num > 50:
num = 10
for i in range(0, num):
passwords.append(''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)))
return Response(json.dumps({"passwords": passwords}), headers={'Content-type':"application/json"}, status_code=200)
|
MicroFocus/CX
|
middle_tier/plugins/sspr/password.py
|
Python
|
mit
| 1,409
|
from __future__ import print_function
import numpy as np
import plfit
import pylab as plt
nel = 2000
alpha = 2.5
xmin = 1.0
data_prob = np.random.rand(nel)
data = plfit.plexp_inv(data_prob, xmin, alpha)
inds = np.argsort(data)
data_prob = data_prob[inds]
data = data[inds]
xdata = np.logspace(-2,2,10000)
plt.figure(1).clf()
plt.loglog(xdata, 1-plfit.plexp_cdf(xdata, xmin=xmin, alpha=alpha), 'k', linewidth=5, alpha=0.2, zorder=-1)
plt.loglog(xdata, 1-plfit.plexp_cdf(xdata, xmin=xmin, alpha=alpha, pl_only=True), 'g', linewidth=3, alpha=1, zorder=0)
plt.loglog(xdata, 1-plfit.plexp_cdf(xdata, xmin=xmin, alpha=alpha, exp_only=True), 'b', linewidth=3, alpha=1, zorder=0)
plt.plot(xmin, 1-plfit.plexp_cdf(xmin, xmin=xmin, alpha=alpha, exp_only=True), 'kx', markersize=20, linewidth=3, alpha=1, zorder=1)
plt.ylim(1e-2,1)
plt.figure(2).clf()
plt.loglog(data, 1-plfit.plexp_cdf(data, xmin=xmin, alpha=alpha), 'k', linewidth=10, alpha=0.1, zorder=-1)
plt.loglog(data, 1-plfit.plexp_cdf(data, xmin=xmin, alpha=alpha, pl_only=True), 'g', linewidth=3, alpha=0.5, zorder=0)
plt.loglog(data, 1-plfit.plexp_cdf(data, xmin=xmin, alpha=alpha, exp_only=True), 'b', linewidth=3, alpha=0.5, zorder=0)
plt.plot(xmin, 1-plfit.plexp_cdf(xmin, xmin=xmin, alpha=alpha, exp_only=True), 'kx', markersize=20, linewidth=3, alpha=1, zorder=1)
plt.loglog(data, 1-data_prob, 'r.', zorder=2, alpha=0.1)
plt.ylim((1-data_prob).min(),1)
result_fo = plfit.plfit(data, quiet=False, silent=False, usecy=False, usefortran=True )
plt.plot(result_fo._xmin, 1-plfit.plexp_cdf(result_fo._xmin, xmin=xmin,
alpha=alpha, exp_only=True), 'ko',
markerfacecolor='none', markeredgecolor='k', markersize=20,
linewidth=3, alpha=1, zorder=1)
|
giserh/plfit
|
plfit/tests/distribution_sanity_check.py
|
Python
|
mit
| 1,762
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.launch.launcher Contains the SKIRTLauncher class, which can be used to launch SKIRT simulations
# locally or remotely.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import math
# Import the relevant PTS classes and modules
from ..simulation.execute import SkirtExec
from ..basics.configurable import Configurable
from ..tools import monitoring, introspection
from ..simulation.definition import SingleSimulationDefinition
from .options import LoggingOptions
from .analyser import SimulationAnalyser
from ..simulation.remote import SKIRTRemote
from ..basics.log import log
from .options import SchedulingOptions
from ..advanced.parallelizationtool import determine_parallelization
from ..advanced.memoryestimator import estimate_memory
from ..simulation.parallelization import Parallelization, get_possible_nprocesses_in_memory
from .options import AnalysisOptions
from ..tools import filesystem as fs
from ..tools import parallelization
from ..simulation.skifile import SkiFile
from ..tools import formatting as fmt
from ..remote.host import load_host
from ..tools.utils import lazyproperty
# -----------------------------------------------------------------
class SKIRTLauncher(Configurable):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs:
:return:
"""
# Call the constructor of the base class
super(SKIRTLauncher, self).__init__(*args, **kwargs)
# -- Attributes --
# Create the local SKIRT execution context
self.skirt = SkirtExec()
# Create the SKIRT remote execution context
self.remote = None
# Create a SimulationAnalyser instance
self.analyser = SimulationAnalyser()
# The simulation definition
self.definition = None
self.remote_input_path = None
self.has_remote_input_files = False
# The logging options
self.logging_options = None
# The analysis options
self.analysis_options = None
# Scheduling options
self.scheduling_options = None
# The parallelization scheme
self.parallelization = None
# The number of processes
self.nprocesses = None
self.nprocesses_per_node = None
# ADVANCED
self.local_script_path = None
self.screen_output_path = None
# The simulation object
self.simulation = None
# Initialize a list to contain the retrieved finished simulations
self.simulations = []
# The specified number of cells
self.ncells = None
# Estimates of the memory requirement
self.memory = None
# -----------------------------------------------------------------
@property
def has_parallelization(self):
"""
This function ...
:return:
"""
# Check whether the number of processes and the number of threads are both defined
#return self.config.arguments.parallel.processes is not None and self.config.arguments.parallel.threads is not None
#return False
return self.parallelization is not None
# -----------------------------------------------------------------
@property
def host_id(self):
"""
This function ...
:return:
"""
return self.config.remote
# -----------------------------------------------------------------
@property
def host(self):
"""
This function returns the Host object
:return:
"""
# If the setup has not been called yet, load the host
if self.remote is None:
if self.host_id is None: return None # local
else: return load_host(self.host_id) # remote
# If the setup has already been called
else: return self.remote.host
# -----------------------------------------------------------------
@property
def cluster_name(self):
"""
This function ...
:return:
"""
# Local execution
if self.host_id is None: return None
# Remote, but setup has not been called yet
elif self.remote is None: # setup has not been called
# Check cluster_name configuration setting
if self.config.cluster_name is not None: return self.config.cluster_name
# Get default cluster for host
else: return self.host.clusters.default
# Remote, and setup has been called (remote has been setup)
else: return self.remote.cluster_name
# -----------------------------------------------------------------
@property
def uses_remote(self):
"""
This function ...
:return:
"""
return self.host is not None
# -----------------------------------------------------------------
@property
def uses_scheduler(self):
"""
This function ...
:return:
"""
if self.host is None: return False
else: return self.host.scheduler
# -----------------------------------------------------------------
@property
def do_retrieve(self):
"""
This function ...
:return:
"""
return self.config.retrieve and self.config.remote
# -----------------------------------------------------------------
@property
def do_show(self):
"""
This function ...
:return:
"""
return self.config.show
# -----------------------------------------------------------------
@property
def do_analyse(self):
"""
This function ...
:return:
"""
return self.config.analyse and self.has_simulations
# -----------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# 2. Set or check parallelization scheme
self.set_or_check_parallelization()
# 3. Launch the simulation
self.launch()
# 4. Retrieve the simulations that are finished
if self.do_retrieve: self.retrieve()
# 5. Show
if self.do_show: self.show()
# 6. Analyse the output of the retrieved simulations
if self.do_analyse: self.analyse()
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(SKIRTLauncher, self).setup(**kwargs)
# Check 'remote' input
if "remote" in kwargs:
remote = kwargs.pop("remote")
if not remote.connected:
if self.config.remote is None: raise ValueError("Unconnected remote is passed but host ID is not specified in configuration")
remote.setup(self.config.remote, self.config.cluster_name)
elif self.config.remote is not None and remote.host_id != self.config.remote:
raise ValueError("Remote is passed for host '" + remote.host_id + "' but configured host ID is '" + self.config.remote + "'")
self.remote = SKIRTRemote.from_remote(remote)
# Setup the remote execution context
if self.config.remote is not None:
self.remote = SKIRTRemote()
self.remote.setup(self.config.remote, self.config.cluster_name)
# Create output directory
if self.config.create_output and not fs.is_directory(self.config.simulation_output): fs.create_directory(self.config.simulation_output)
# Create the logging options
if "logging_options" in kwargs and kwargs["logging_options"] is not None: self.logging_options = kwargs.pop("logging_options")
else:
self.logging_options = LoggingOptions()
self.logging_options.set_options(self.config.logging)
# Create the analysis options
if "analysis_options" in kwargs and kwargs["analysis_options"] is not None: self.set_analysis_options(kwargs.pop("analysis_options"))
else: self.create_analysis_options()
# Get scheduling options
if self.uses_scheduler:
if "scheduling_options" in kwargs and kwargs["scheduling_options"] is not None: self.scheduling_options = kwargs.pop("scheduling_options")
# Add the walltime to the scheduling options
if self.config.walltime is not None:
if self.scheduling_options is None: self.scheduling_options = SchedulingOptions()
self.scheduling_options.walltime = self.config.walltime
# Get the memory information passed to this instance
self.memory = kwargs.pop("memory", None)
# Get the definition
if "definition" in kwargs: self.definition = kwargs.pop("definition")
# Has remote input?
if "has_remote_input_files" in kwargs: self.has_remote_input_files = kwargs.pop("has_remote_input_files")
if self.has_remote_input_files and self.remote is None: raise ValueError("Cannot have remote input files when launching simulation locally")
# Has remote input path?
if "remote_input_path" in kwargs: self.remote_input_path = kwargs.pop("remote_input_path")
if self.remote_input_path is not None and self.remote is None: raise ValueError("Cannot have remote input path when launching simulation locally")
if self.remote_input_path is not None and self.has_remote_input_files: raise ValueError("Cannot have remote input path and have seperate remote input files simultaneously")
# Get the parallelization
if "parallelization" in kwargs: self.parallelization = kwargs.pop("parallelization")
# Get the number of processes
if "nprocesses" in kwargs: self.nprocesses = kwargs.pop("nprocesses")
if "nprocesses_per_node" in kwargs: self.nprocesses_per_node = kwargs.pop("nprocesses_per_node")
# ADVANCED
if "local_script_path" in kwargs: self.local_script_path = kwargs.pop("local_script_path")
if "screen_output_path" in kwargs: self.screen_output_path = kwargs.pop("screen_output_path")
# Get the number of dust cells if given
if "ncells" in kwargs: self.ncells = kwargs.pop("ncells")
##
# Create the simulation definition (if necessary)
if self.definition is None: self.create_definition()
# -----------------------------------------------------------------
@property
def has_nprocesses(self):
"""
This function ...
:return:
"""
return self.nprocesses is not None
# -----------------------------------------------------------------
@property
def has_nprocesses_per_node(self):
"""
This function ...
:return:
"""
return self.nprocesses_per_node is not None
# -----------------------------------------------------------------
def create_analysis_options(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the analysis options ...")
# Create the analysis options object
self.analysis_options = AnalysisOptions()
self.analysis_options.set_options(self.config.analysis)
# Check the options
self.analysis_options.check(logging_options=self.logging_options, output_path=self.config.simulation_output, retrieve_types=self.config.retrieve_types)
# -----------------------------------------------------------------
def set_analysis_options(self, options):
"""
This function ...
:param options:
:return:
"""
# Inform the user
log.info("Setting the analysis options ...")
# Set
self.analysis_options = options
# Check the options
self.analysis_options.check(logging_options=self.logging_options, output_path=self.config.simulation_output, retrieve_types=self.config.retrieve_types)
# -----------------------------------------------------------------
def create_definition(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the simulation definition ...")
# Create the simulation definition
self.definition = SingleSimulationDefinition(self.config.ski, self.config.simulation_output, self.config.simulation_input)
# -----------------------------------------------------------------
def set_or_check_parallelization(self):
"""
This function ...
:return:
"""
# Set the parallelization scheme
if not self.has_parallelization: self.set_parallelization()
# Check
elif self.config.check_parallelization: self.check_parallelization()
# -----------------------------------------------------------------
def set_parallelization(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Setting the parallelization scheme ...")
# Remote
if self.config.remote: self.set_parallelization_remote()
# Local
else: self.set_parallelization_local()
# -----------------------------------------------------------------
def set_parallelization_local(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Determining the optimal parallelization scheme for local execution ...")
# Determine the number of processes
processes = self.get_nprocesses_local()
# Calculate the maximum number of threads per process based on the current cpu load of the system
free_cpus = monitoring.free_cpus()
threads = int(free_cpus / processes)
# If there are too little free cpus for the amount of processes, the number of threads will be smaller than one
if threads < 1:
log.warning("The number of processes was " + str(processes) + " but the number of free CPU's is only " + str(free_cpus))
processes = max(int(free_cpus), 1)
log.warning("Adjusting the number of processes to " + str(processes) + " ...")
threads = 1
# Determine number of cores
cores = processes * threads
threads_per_core = 2
# Debugging
log.debug("The number of cores is " + str(cores))
log.debug("The number of thread per core is " + str(threads_per_core))
log.debug("The number of processes is " + str(processes))
# Set the parallelization scheme
self.parallelization = Parallelization(cores, threads_per_core, processes, data_parallel=self.config.data_parallel_local)
# Debugging
log.debug("The parallelization scheme is " + str(self.parallelization))
# -----------------------------------------------------------------
def get_nprocesses_local(self):
"""
This function ...
:return:
"""
# Check whether MPI is available
if not introspection.has_mpi():
# Check nprocesses
if self.has_nprocesses and self.nprocesses > 1: raise ValueError("The number of processes that is specified is not possible: MPI installation not present")
# Set number of processes to 1
processes = 1
# MPI present and number of processes is defined
elif self.has_nprocesses: processes = self.nprocesses
# MPI present and number of processes not defined
else:
# If memory requirement is not set
if self.memory is None: self.memory = estimate_memory(self.definition.ski_path, input_path=self.config.simulation_input, ncells=self.ncells)
# Determine the number of possible nprocesses
processes = get_possible_nprocesses_in_memory(monitoring.free_memory(), self.memory.serial,
self.memory.parallel, data_parallel=self.config.data_parallel_local)
# Return
return processes
# -----------------------------------------------------------------
@lazyproperty
def ski(self):
"""
This function ...
:return:
"""
return SkiFile(self.definition.ski_path)
# -----------------------------------------------------------------
@lazyproperty
def nwavelengths(self):
"""
This function ...
:return:
"""
# No file wavelength grid
if not self.ski.wavelengthsfile(): return self.ski.nwavelengths()
# File wavelength grid
# Remote
elif self.uses_remote:
# Some input files may be remote
if self.has_remote_input_files:
from ..simulation.input import find_input_filepath
filename = self.ski.wavelengthsfilename()
filepath = find_input_filepath(filename, self.definition.input_path)
if fs.is_file(filepath): return self.ski.nwavelengthsfile(self.definition.input_path)
elif self.remote.is_file(filepath):
nwavelengths = int(self.remote.read_first_line(filepath))
return nwavelengths
else: raise ValueError("We shouldn't get here")
# Remote input directory is specified
elif self.remote_input_path is not None:
filename = self.ski.wavelengthsfilename()
filepath = fs.join(self.remote_input_path, filename)
# Check
if not self.remote.is_file(filepath): raise IOError("The remote input file '" + filename + "' does not exist in '" + self.remote_input_path + "'")
# Get the number of wavelengths and return
nwavelengths = int(self.remote.read_first_line(filepath))
return nwavelengths
# Nothing is remote
else: return self.ski.nwavelengthsfile(self.definition.input_path)
# No remote
else: return self.ski.nwavelengthsfile(self.definition.input_path)
# -----------------------------------------------------------------
@lazyproperty
def dustlib_dimension(self):
"""
This function ...
:return:
"""
return self.ski.dustlib_dimension()
# -----------------------------------------------------------------
def set_parallelization_remote(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Setting the optimal parallelization scheme for remote execution ...")
# If the remote uses a scheduling system
if self.remote.scheduler:
# Set host properties
nnodes = self.config.nnodes
nsockets = self.remote.host.cluster.sockets_per_node
ncores = self.remote.host.cluster.cores_per_socket
memory = self.remote.host.cluster.memory
mpi = True
hyperthreading = self.remote.host.use_hyperthreading
threads_per_core = self.remote.host.cluster.threads_per_core
# Remote does not use a scheduling system
else:
# Get host properties
nnodes = 1
nsockets = int(math.floor(self.remote.free_sockets))
ncores = self.remote.cores_per_socket
memory = self.remote.free_memory
mpi = True
hyperthreading = self.remote.host.use_hyperthreading
threads_per_core = self.remote.threads_per_core
# The number of processes is defined
if self.has_nprocesses:
# Determine cores per node and total number of cores
cores_per_node = nsockets * ncores
total_ncores = nnodes * cores_per_node
# Check number of processes
if self.nprocesses > cores_per_node: raise ValueError("The number of processes cannot be larger than the number of cores per node (" + str(cores_per_node) + ")")
# Determine other parameters
ppn = nsockets * ncores
nprocesses_per_node = int(self.nprocesses / nnodes)
nprocesses = nprocesses_per_node * nnodes
ncores_per_process = ppn / nprocesses_per_node
threads_per_core = threads_per_core if hyperthreading else 1
threads_per_process = threads_per_core * ncores_per_process
# Determine data-parallel flag
if self.config.data_parallel_remote is None:
if self.nwavelengths >= 10 * nprocesses and self.dustlib_dimension == 3: data_parallel = True
else: data_parallel = False
else: data_parallel = self.config.data_parallel_remote
# Create the parallelization object
self.parallelization = Parallelization.from_mode("hybrid", total_ncores, threads_per_core,
threads_per_process=threads_per_process,
data_parallel=data_parallel)
# The number of processes per node is defined
elif self.has_nprocesses_per_node:
# Determine other parameters
ppn = nsockets * ncores
nprocesses = self.nprocesses_per_node * self.config.nnodes
ncores_per_process = ppn / self.nprocesses_per_node
threads_per_core = threads_per_core if hyperthreading else 1
threads_per_process = threads_per_core * ncores_per_process
total_ncores = nnodes * nsockets * ncores
# Determine data-parallel flag
if self.config.data_parallel_remote is None:
if self.nwavelengths >= 10 * nprocesses and self.dustlib_dimension == 3: data_parallel = True
else: data_parallel = False
else: data_parallel = self.config.data_parallel_remote
# Create the parallelization object
self.parallelization = Parallelization.from_mode("hybrid", total_ncores, threads_per_core,
threads_per_process=threads_per_process,
data_parallel=data_parallel)
# Determine the parallelization scheme with the parallelization tool
# ski_path, input_path, memory, nnodes, nsockets, ncores, host_memory, mpi, hyperthreading, threads_per_core, ncells=None
else: self.parallelization = determine_parallelization(self.definition.ski_path, self.definition.input_path, self.memory, nnodes, nsockets, ncores, memory, mpi, hyperthreading, threads_per_core, ncells=self.ncells, nwavelengths=self.nwavelengths)
# Debugging
log.debug("The parallelization scheme is " + str(self.parallelization))
# -----------------------------------------------------------------
def check_parallelization(self):
"""
This function checks whether the parallelization scheme that is asked by the user is possible given the
number of cores and hyperthreads per core on the remote host.
Returns:
"""
# Check locally or remotely
if self.config.remote: self.check_parallelization_remote()
else: self.check_parallelization_local()
# -----------------------------------------------------------------
def check_parallelization_remote(self):
"""
Thisf unction ...
:return:
"""
# Inform the user
log.info("Checking the parallelization scheme ...")
# If the remote host uses a scheduling system, check whether the parallelization options are possible
# based on the cluster properties defined in the configuration
if self.remote.scheduler:
# Determine the total number of hardware threads that can be used on the remote cluster
hardware_threads_per_node = self.remote.cores_per_node
if self.remote.use_hyperthreading: hardware_threads_per_node *= self.remote.threads_per_core
# Raise an error if the number of requested threads per process exceeds the number of hardware threads
# per node
if self.config.arguments.parallel.threads > hardware_threads_per_node:
raise RuntimeError("The number of requested threads per process exceeds the number of allowed threads per node")
# Determine the number of processes per node (this same calculation is also done in JobScript)
# self.remote.cores = cores per node
processes_per_node = self.remote.cores_per_node // self.parallelization.threads
# Determine the amount of requested nodes based on the total number of processes and the number of processes per node
requested_nodes = math.ceil(self.config.arguments.parallel.processes / processes_per_node)
# Raise an error if the number of requested nodes exceeds the number of nodes of the system
if requested_nodes > self.remote.nodes: raise RuntimeError("The required number of computing nodes for"
"the requested number of processes and threads "
"exceeds the existing number of nodes")
# No scheduling system
else:
# Determine the total number of requested threads
requested_threads = self.parallelization.processes * self.parallelization.threads
# Determine the total number of hardware threads that can be used on the remote host
hardware_threads = self.remote.cores_per_node
if self.remote.use_hyperthreading: hardware_threads *= self.remote.threads_per_core
# If the number of requested threads is greater than the allowed number of hardware threads, raise
# an error
if requested_threads > hardware_threads: raise RuntimeError("The requested number of processes and threads "
"exceeds the total number of hardware threads")
# -----------------------------------------------------------------
def check_parallelization_local(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Checking the parallelization scheme ...")
# Determine the total number of requested threads
requested_threads = self.parallelization.nthreads
# Determine the total number of hardware threads that can be used on the remote host
hardware_threads = parallelization.ncores()
if parallelization.has_hyperthreading(): hardware_threads *= parallelization.nthreads_per_core()
# If the number of requested threads is greater than the allowed number of hardware threads, raise
# an error
if requested_threads > hardware_threads: raise RuntimeError("The requested number of processes and threads "
"exceeds the total number of hardware threads")
# -----------------------------------------------------------------
def launch(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Launching the simulation ...")
# Launch remotely
if self.config.remote is not None: self.launch_remote()
# Launch locally
else: self.launch_local()
# -----------------------------------------------------------------
def launch_local(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Launching the simulation locally ...")
# Run the simulation
self.simulation = self.skirt.run(self.definition, logging_options=self.logging_options, silent=False, wait=True,
show_progress=self.config.show_progress, parallelization=self.parallelization,
finish_after=self.config.finish_after, finish_at=self.config.finish_at,
debug_output=self.config.debug_output)
# Set the simulation name
self.simulation.name = self.definition.prefix
# Set the analysis options for the simulation
self.simulation.analysis = self.analysis_options
# Add the locally run simulation to the list of simulations to be analysed
self.simulations.append(self.simulation)
# -----------------------------------------------------------------
@property
def remove_remote_input(self):
"""
This function ...
:return:
"""
if self.remote_input_path is not None or self.has_remote_input_files: return False
elif self.config.keep_input: return False
else: return not self.config.keep
# -----------------------------------------------------------------
@property
def remove_remote_output(self):
"""
Thisn function ...
:return:
"""
return not self.config.keep
# -----------------------------------------------------------------
@property
def remove_remote_simulation_directory(self):
"""
Thisn function ...
:return:
"""
return not self.config.keep
# -----------------------------------------------------------------
def launch_remote(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Launching the simulation remotely ...")
# Resolve the remote screen output directory path
screen_output_path = self.screen_output_path.replace("$HOME", self.remote.home_directory).replace("$SKIRT", self.remote.skirt_root_path) if self.screen_output_path is not None else None
# Create the necessary directories for the screen output file
#screen_output_dirpath = fs.directory_of(screen_output_path)
#if not self.remote.is_directory(screen_output_dirpath): self.remote.create_directory(screen_output_dirpath, recursive=True)
if screen_output_path is not None and not self.remote.is_directory(screen_output_path): self.remote.create_directory(screen_output_path, recursive=True)
# Run the simulation
self.simulation = self.remote.run(self.definition, self.logging_options, self.parallelization,
scheduling_options=self.scheduling_options, attached=self.config.attached,
analysis_options=self.analysis_options, show_progress=self.config.show_progress,
local_script_path=self.local_script_path, screen_output_path=screen_output_path,
remote_input_path=self.remote_input_path, has_remote_input=self.has_remote_input_files,
debug_output=self.config.debug_output, retrieve_types=self.config.retrieve_types,
remove_remote_input=self.remove_remote_input, remove_remote_output=self.remove_remote_output,
remove_remote_simulation_directory=self.remove_remote_simulation_directory)
# -----------------------------------------------------------------
def retrieve(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Retrieving finished simulations...")
# Get a list of the simulations that have been succesfully retrieved
self.simulations = self.remote.retrieve()
# -----------------------------------------------------------------
@property
def nsimulations(self):
"""
This function ...
:return:
"""
return len(self.simulations)
# -----------------------------------------------------------------
@property
def has_simulations(self):
"""
This function ...
:return:
"""
return self.nsimulations > 0
# -----------------------------------------------------------------
def show(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Showing ...")
# Show finished simulations
if self.has_simulations and self.config.show_finished: self.show_finished()
# -----------------------------------------------------------------
def show_finished(self):
"""
This function ....
:return:
"""
# Inform the user
log.info("Showing the output of finished simulations ...")
# Loop over the simulations
print("")
for simulation in self.simulations:
# Print the simulation name
print(fmt.blue + simulation.prefix() + fmt.reset + ":")
# Show the output
simulation.output.show(line_prefix=" ")
# -----------------------------------------------------------------
def analyse(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Analysing the output of retrieved simulations...")
# Loop over the list of simulations and analyse them
for simulation in self.simulations:
# Run the analyser on the simulation
self.analyser.run(simulation=simulation)
# Clear the analyser
self.analyser.clear()
# -----------------------------------------------------------------
class SingleImageSKIRTLauncher(object):
"""
This class ...
"""
def __init__(self):
"""
The constructor ...
"""
# The SKIRT execution context
self.skirt = SkirtExec()
# -----------------------------------------------------------------
def run(self, ski_path, out_path, wcs, total_flux, kernel, instrument_name=None, show_progress=False):
"""
This function ...
:param ski_path:
:param out_path:
:param wcs:
:param total_flux:
:param kernel:
:param instrument_name:
:param show_progress:
:return:
"""
from ...magic.core.frame import Frame
from ..simulation.arguments import SkirtArguments
# Create a SkirtArguments object
arguments = SkirtArguments()
# Adjust the parameters
arguments.ski_pattern = ski_path
arguments.output_path = out_path
arguments.single = True # we expect a single simulation from the ski pattern
# Inform the user
log.info("Running a SKIRT simulation with " + str(fs.name(ski_path)) + " ...")
# Run the simulation
simulation = self.skirt.run(arguments, silent=False if log.is_debug else True, show_progress=show_progress)
# Get the simulation prefix
prefix = simulation.prefix()
# Get the (frame)instrument name
if instrument_name is None:
# Get the name of the unique instrument (give an error if there are more instruments)
instrument_names = simulation.parameters().get_instrument_names()
assert len(instrument_names) == 1
instrument_name = instrument_names[0]
# Determine the name of the SKIRT output FITS file
fits_name = prefix + "_" + instrument_name + "_total.fits"
# Determine the path to the output FITS file
fits_path = fs.join(out_path, fits_name)
# Check if the output contains the "disk_earth_total.fits" file
if not fs.is_file(fits_path): raise RuntimeError("Something went wrong with the " + prefix + " simulation: output FITS file missing")
# Open the simulated frame
simulated_frame = Frame.from_file(fits_path)
# Set the coordinate system of the disk image
simulated_frame.wcs = wcs
# Debugging
log.debug("Rescaling the " + prefix + " image to a flux density of " + str(total_flux) + " ...")
# Rescale to the flux density
simulated_frame.normalize(to=total_flux)
# Debugging
log.debug("Convolving the " + prefix + " image ...")
# Convolve the frame
simulated_frame.convolve(kernel)
# Return the frame
return simulated_frame
# -----------------------------------------------------------------
def launch_single_image():
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
def generate_frame(ski, projection):
"""
This function ...
:param ski:
:param projection:
:return:
"""
# Create instrument
from ...modeling.basics.instruments import FrameInstrument
instrument = FrameInstrument.from_projection(projection)
# -----------------------------------------------------------------
|
SKIRT/PTS
|
core/launch/launcher.py
|
Python
|
agpl-3.0
| 37,871
|
__all__ = ['Tick']
class Tick(object):
'''Tick Class.
A new Tick object is instantiated every tick by BehaviorTree. It is passed
as parameter to the nodes through the tree during the traversal.
The role of the Tick class is to store the instances of tree, debug, target
and blackboard. So, all nodes can access these informations.
For internal uses, the Tick also is useful to store the open node after the
tick signal, in order to let `BehaviorTree` to keep track and close them
when necessary.
This class also makes a bridge between nodes and the debug, passing the
node state to the debug if the last is provided.
'''
def __init__(self, tree=None, target=None, blackboard=None, debug=None):
'''Constructor.
:param tree: a BehaviorTree instance.
:param target: a target object.
:param blackboard: a Blackboard instance.
:param debug: a debug instance.
'''
self.tree = tree
self.target = target
self.blackboard = blackboard
self.debug = debug
self._open_nodes = []
self._node_count = 0
def _enter_node(self, node):
'''Called when entering a node (called by BaseNode).
:param node: a node instance.
'''
self._node_count += 1
self._open_nodes.append(node)
def _open_node(self, node):
'''Called when opening a node (called by BaseNode).
:param node: a node instance.
'''
pass
def _tick_node(self, node):
'''Called when ticking a node (called by BaseNode).
:param node: a node instance.
'''
pass
def _close_node(self, node):
'''Called when closing a node (called by BaseNode).
:param node: a node instance.
'''
self._open_nodes.pop()
def _exit_node(self, node):
'''Called when exiting a node (called by BaseNode).
:param node: a node instance.
'''
pass
|
renatopp/behavior3py
|
b3/core/tick.py
|
Python
|
mit
| 2,005
|
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import period_array
@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])
def test_astype(dtype):
# We choose to ignore the sign and size of integers for
# Period/Datetime/Timedelta astype
arr = period_array(["2000", "2001", None], freq="D")
with tm.assert_produces_warning(FutureWarning):
# astype(int..) deprecated
result = arr.astype(dtype)
if np.dtype(dtype).kind == "u":
expected_dtype = np.dtype("uint64")
else:
expected_dtype = np.dtype("int64")
with tm.assert_produces_warning(FutureWarning):
# astype(int..) deprecated
expected = arr.astype(expected_dtype)
assert result.dtype == expected_dtype
tm.assert_numpy_array_equal(result, expected)
def test_astype_copies():
arr = period_array(["2000", "2001", None], freq="D")
with tm.assert_produces_warning(FutureWarning):
# astype(int..) deprecated
result = arr.astype(np.int64, copy=False)
# Add the `.base`, since we now use `.asi8` which returns a view.
# We could maybe override it in PeriodArray to return ._data directly.
assert result.base is arr._data
with tm.assert_produces_warning(FutureWarning):
# astype(int..) deprecated
result = arr.astype(np.int64, copy=True)
assert result is not arr._data
tm.assert_numpy_array_equal(result, arr._data.view("i8"))
def test_astype_categorical():
arr = period_array(["2000", "2001", "2001", None], freq="D")
result = arr.astype("category")
categories = pd.PeriodIndex(["2000", "2001"], freq="D")
expected = pd.Categorical.from_codes([0, 1, 1, -1], categories=categories)
tm.assert_categorical_equal(result, expected)
def test_astype_period():
arr = period_array(["2000", "2001", None], freq="D")
result = arr.astype(PeriodDtype("M"))
expected = period_array(["2000", "2001", None], freq="M")
tm.assert_period_array_equal(result, expected)
@pytest.mark.parametrize("other", ["datetime64[ns]", "timedelta64[ns]"])
def test_astype_datetime(other):
arr = period_array(["2000", "2001", None], freq="D")
# slice off the [ns] so that the regex matches.
with pytest.raises(TypeError, match=other[:-4]):
arr.astype(other)
|
rs2/pandas
|
pandas/tests/arrays/period/test_astype.py
|
Python
|
bsd-3-clause
| 2,421
|
"""
Testing join operations.
"""
import unittest
class TestJoin(unittest.TestCase):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
`Foo(int a, int b).
Bar(int a, int b).`
`Foo(a,b) :- a=$range(0, 5), b=$range(0,3).`
# Foo(a,b) = {(0,0), (0,1), (0,2), (1,0), ...(4, 2)}
def setUp(self):
`clear Bar.`
def test_simple_insert(self):
`clear Foo.`
`Foo(a,b) :- a=$range(0, 1000), b=42.`
count=0
sum = 0
for a,b in `Foo(a,b)`:
count+=1
sum += b
self.assertEqual(count, 1000,
"Unexpected number of tuples in Foo. Expecting # 1000 tuples, but got # "+str(count))
self.assertEqual(sum, 42*1000,
"Unexpected sum for 2nd column of Foo. Expecting 42000, but got # "+str(s))
`Foo2(int a, (int b)) indexby a, indexby b.
Foo2(a,b) :- a=$range(0, 1000), b=$range(0, 420).`
count=0
for a,b in `Foo2(a,b)`:
count+=1
self.assertEqual(count, 420*1000,
"Unexpected number of tuples in Foo2. Expecting # 420000 tuples, but got # "+str(count))
def test_not_contains_with_dontcare(self):
`Bar(a,b) :- a=$range(4, 7), b=$range(0,4), !Foo(_,b).`
l=[]
for a,b in `Bar(a,b)`:
l.append((a,b))
exp = [(4,3), (5,3), (6,3)]
self.assertEqual(l, exp, "Unexpected tuples in Bar(a,b). Expecting "+str(exp)+", but "+str(l))
`clear Bar.`
`Bar(a,b) :- a=$range(4, 7), b=$range(0,4), !Foo(a,_).`
l=set(`Bar(a,b)`)
exp=set([(5,0), (5,1), (5,2), (5,3), (6,0), (6,1), (6,2), (6,3)])
self.assertEqual(l, exp, "Unexpected tuples in Bar(a,b). Expecting "+str(exp)+", but "+str(l))
def test_contains_with_dontcare(self):
`Bar(a,b) :- a=$range(4, 7), b=$range(0,4), Foo(_,b).`
l=set([])
for a,b in `Bar(a,b)`:
l.add((a,b))
exp = set([(4,0), (4,1), (4,2), (5,0), (5,1), (5,2), (6,0), (6,1), (6,2)])
self.assertEqual(l, exp, "Unexpected tuples in Bar(a,b). Expecting "+str(exp)+", but "+str(l))
`clear Bar.`
`Bar(a,b) :- a=$range(4, 7), b=$range(0,4), Foo(a,_).`
l=set(`Bar(a,b)`)
exp=set([(4,0), (4,1), (4,2), (4,3)])
self.assertEqual(l, exp, "Unexpected tuples in Bar(a,b). Expecting "+str(exp)+", but "+str(l))
def test_not_contains(self):
`Bar(a,b) :- a=$range(4, 7), b=$range(0,4), !Foo(a,b).`
l=set()
for a,b in `Bar(a,b)`:
l.add((a,b))
exp=set([(4,3), (5,0), (5,1), (5,2), (5,3), (6,0), (6,1), (6,2), (6,3)])
self.assertEqual(l, exp, "Unexpected tuples in Bar(a,b). Expecting "+str(exp)+", but "+str(l))
`clear Bar.`
`Bar(a,b) :- a=$range(4, 7), b=$range(0,4), Foo(a,b).`
l=set(`Bar(a,b)`)
exp=set([(4,0), (4,1), (4,2)])
self.assertEqual(l, exp, "Unexpected tuples in Bar(a,b). Expecting "+str(exp)+", but "+str(l))
def test_not_contains2(self):
`StopWords(String s).
StopWords(a) :- a=$splitIter("a an and the of in to for with", " ").`
`Titles(String s).
Titles(t) :- t="k-means clustering with scikit-learn".
Titles(t) :- t="gradient boosted regression trees in scikit-learn".
Titles(t) :- t="know thy neighbor: an introduction to scikit-learn and k-nn".
Titles(t) :- t="sentiment classification using scikit-learn".`
`Words(String w, int cnt).
Words(w, $inc(1)) :- Titles(t), w=$splitIter(t, " "), !StopWords(w).`
_,cnt=list(`Words("scikit-learn", cnt)`)[0]
self.assertEqual(cnt, 4)
def test_outjoin(self):
`Qux(int a, (int b)).`
`Bar(a, b) :- a=$range(0, 2), b=1.
Qux(a, b) :- Foo(a, c), Bar(b, c).`
l=set(`Qux(a,b)`)
exp=set([(0,0), (0,1), (1,0), (1,1), (2,0), (2,1), (3,0), (3,1), (4,0), (4,1)])
self.assertEqual(l, exp, "Unexpected tuples in Qux(a,b). Expecting "+str(exp)+", but "+str(l))
def test_binarySearch(self):
`FooSorted(int a, int b) indexby a.
FooSorted(a,b) :- a=10, b=20.
FooSorted(a,b) :- a=11, b=21.
FooSorted(a,b) :- a=12, b=22.
Bar(a,b) :- FooSorted(a, b), a>=11.`
l=set(`Bar(a,b)`)
exp=set([(11,21), (12,22)])
self.assertEqual(l, exp, "Unexpected tuples in Bar(a,b). Expecting "+str(exp)+", but "+str(l))
`FooSorted2(int a, float b, long c) indexby c.
FooSorted2(a,b,c) :- a=10, b=10.0f, c=10L.
FooSorted2(a,b,c) :- a=11, b=11.0f, c=11L.
FooSorted2(a,b,c) :- a=12, b=12.0f, c=12L.
Bar2(int a, long b).
Bar2(a,c) :- FooSorted2(a, b, c), c>=11.`
l=set(`Bar2(a,b)`)
exp=set([(11,11.0), (12,12.0)])
self.assertEqual(l, exp, "Unexpected tuples in Bar2(a,b). Expecting "+str(exp)+", but "+str(l))
def test_nested_binarySearch(self):
`FooNested1(int a, (int b)) indexby a, indexby b.
FooNested1(a,b) :- a=10, b=20.
FooNested1(a,b) :- a=11, b=21.
FooNested1(a,b) :- a=11, b=22.
FooNested1(a,b) :- a=12, b=23.
Bar(a,b) :- FooNested1(a, b), a>=11.`
l=set(`Bar(a,b)`)
exp=set([(11,21), (11,22), (12,23)])
self.assertEqual(l, exp, "Unexpected tuples in Bar(a,b). Expecting "+str(exp)+", but "+str(l))
`clear Bar.`
`Bar(a,b) :- FooNested1(a, b), b>=22.`
l=set(`Bar(a,b)`)
exp=set([(11,22), (12,23)])
self.assertEqual(l, exp, "Unexpected tuples in Bar(a,b). Expecting "+str(exp)+", but "+str(l))
def test_nested_binarySearch2(self):
`FooNested2(int a, (long b, float c)) indexby a, indexby b, indexby c.
FooNested2(a,b,c) :- a=10, b=10L, c=10.0f.
FooNested2(a,b,c) :- a=10, b=11L, c=11.0f.
FooNested2(a,b,c) :- a=20, b=20L, c=20.0f.
FooNested2(a,b,c) :- a=20, b=21L, c=21.0f.`
# # binary search with (nested) float type column
# `clear Bar.`
# `Bar(a,(int)b) :- FooNested2(a, b, c), c>10.0f.`
# l=set(`Bar(a,b)`)
# exp=set([(10,11), (20,20), (20, 21)])
# self.assertEqual(l, exp, "Unexpected tuples in Bar(a,b). Expecting "+str(exp)+", but "+str(l))
#
# # binary search with (nested) long type column
# `clear Bar.`
# `Bar(a,(int)b) :- FooNested2(a, b, c), b>10L.`
# l=set(`Bar(a,b)`)
# exp=set([(10,11), (20,20), (20, 21)])
# self.assertEqual(l, exp, "Unexpected tuples in Bar(a,b). Expecting "+str(exp)+", but "+str(l))
# binary search with (nested) long type column with indexby
`clear Bar.`
`FooNested2(a,b,c) :- a=$range(21, 10000), b=42L, c=42.0f.`
`Bar(20,(int)b) :- FooNested2(20, b, c), b>10L.`
l=set(`Bar(a,b)`)
exp=set([(20, 20), (20, 21)])
self.assertEqual(l, exp, "Unexpected tuples in Bar(a,b). Expecting "+str(exp)+", but "+str(l))
if __name__ == '__main__':
unittest.main()
|
ofermend/medicare-demo
|
socialite/test/test_joins.py
|
Python
|
apache-2.0
| 7,084
|
#bolttools - a framework for creation of part libraries
#Copyright (C) 2013 Johannes Reinhardt <jreinhardt@ist-dein-freund.de>
#
#This library is free software; you can redistribute it and/or
#modify it under the terms of the GNU Lesser General Public
#License as published by the Free Software Foundation; either
#version 2.1 of the License, or any later version.
#
#This library is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#Lesser General Public License for more details.
#
#You should have received a copy of the GNU Lesser General Public
#License along with this library; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from os import makedirs, listdir
from os.path import join, exists
from shutil import copyfile
import xlwt
from errors import *
from common import BackendExporter
class SolidWorksExporter(BackendExporter):
def __init__(self,repo,databases):
BackendExporter.__init__(self,repo,databases)
self.solidworks = databases["solidworks"]
def write_output(self,out_path,version,stable=False):
self.clear_output_dir(out_path)
ver_root = join(out_path,version)
makedirs(ver_root)
for designtable in self.solidworks.designtables:
#build class lookup, we need to search for classes by ids
blt_classes = {}
for coll in self.repo.collections:
if not coll.id == designtable.collection:
continue
for cl in coll.classes_by_ids():
if cl.id in blt_classes:
raise NonUniqueClassIdError(cl.id)
blt_classes[cl.id] = cl
#create directories and copy model files
coll_path = join(ver_root,designtable.collection)
if not exists(coll_path):
makedirs(coll_path)
#check for case
if designtable.filename not in listdir(
join(self.solidworks.backend_root,designtable.collection)
):
raise FileNotFoundError(designtable.filename)
model_path = join(coll_path,designtable.filename)
if not exists(model_path):
copyfile(designtable.path,model_path)
#create designtable
workbook = xlwt.Workbook("utf8")
worksheet = workbook.add_sheet("Sheet1")
#write column headers
col = 1
row = 0
for pname in designtable.params:
worksheet.write(row,col,pname)
col += 1
for mname in designtable.metadata:
worksheet.write(row,col,mname)
col += 1
#write configurations
row = 1
for dtcl in designtable.classes:
cl = blt_classes[dtcl.classid]
for free in cl.parameters.common:
params = cl.parameters.collect(dict(zip(cl.parameters.free,free)))
name = "undefined"
if name is None:
name = cl.naming.get_name(params)
else:
name = dtcl.naming.get_name(params)
col = 0
worksheet.write(row,col,name)
col += 1
for pname in designtable.params.values():
worksheet.write(r,c,params[pname])
col += 1
for pname in designtable.metadata.values():
worksheet.write(r,c,params[pname])
col += 1
row += 1
workbook.save(join(coll_path,designtable.outname))
|
berndhahnebach/BOLTS
|
backends/solidworks.py
|
Python
|
gpl-3.0
| 3,874
|
# coding:utf-8
import json
import socket
import datetime
import redis
from logger.log import crawler
from config.conf import (
get_redis_args,
get_share_host_count,
get_running_mode,
get_cookie_expire_time
)
mode = get_running_mode()
redis_args = get_redis_args()
share_host_count = get_share_host_count()
cookie_expire_time = get_cookie_expire_time()
# todo 考虑并发条件下的cookie存取
class Cookies(object):
rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('cookies'))
rd_con_broker = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('broker'))
@classmethod
def store_cookies(cls, name, cookies):
pickled_cookies = json.dumps(
{'cookies': cookies, 'loginTime': datetime.datetime.now().timestamp()})
cls.rd_con.hset('account', name, pickled_cookies)
cls.push_in_queue(name)
@classmethod
def push_in_queue(cls, name):
# 在节点数和任务执行较快的情况下,并不能保证队列中无重复名字。如果账号已在队列,则不重复存储
for i in range(cls.rd_con.llen('account_queue')):
tn = cls.rd_con.lindex('account_queue', i)
if tn:
if tn == name:
return
cls.rd_con.rpush('account_queue', name)
@classmethod
def fetch_cookies(cls):
# todo 阻塞过后通过email通知用户
if mode == 'normal':
return cls.fetch_cookies_of_normal()
else:
return cls.fetch_cookies_of_quick()
@classmethod
def fetch_cookies_of_normal(cls):
# 轮询可用账号
for i in range(cls.rd_con.llen('account_queue')):
name = cls.rd_con.blpop('account_queue').decode('utf-8')
# 这里判断name是否存在,是因为在抓取过程中可能cookie被封,从account_queue取出的account
# 可能是已经不存在于account中的name了
j_account = cls.rd_con.hget('account', name).decode('utf-8')
if j_account:
if cls.check_cookies_timeout(j_account):
cls.delete_cookies(name)
continue
cls.rd_con.rpush('account_queue', name)
account = json.loads(j_account)
return name, account['cookies']
return None
@classmethod
def fetch_cookies_of_quick(cls):
# cookies记录被使用的主机数目,当超过上限,则将其放至队尾,未超过则获取并记录
# todo 这里用hostname来标识不同主机其实是有小问题的,比如不同ip主机名可能相同
hostname = socket.gethostname()
# 如果redis中已有相关主机的cookie,则直接取出来
my_cookies_name = cls.rd_con.hget('host', hostname)
if my_cookies_name:
my_cookies = cls.rd_con.hget('account', my_cookies_name)
if not cls.check_cookies_timeout(my_cookies): # 没有占用或cookies过期则取一个新的
my_cookies = json.loads(my_cookies.decode('utf-8'))
return my_cookies_name, my_cookies['cookies']
else:
cls.delete_cookies(my_cookies_name)
while True:
try:
name = cls.rd_con.blpop('account_queue')[1].decode('utf-8')
except AttributeError:
return None
else:
j_account = cls.rd_con.hget('account', name)
if cls.check_cookies_timeout(j_account):
cls.delete_cookies(name)
continue
j_account = j_account.decode('utf-8')
# account-host对应关系(一对多)
hosts = cls.rd_con.hget('cookies_host', name)
if not hosts:
hosts = dict()
else:
hosts = hosts.decode('utf-8')
hosts = json.loads(hosts)
hosts[hostname] = 1
cls.rd_con.hset('cookies_host', name, json.dumps(hosts))
# host-account对应关系(一对一)
account = json.loads(j_account)
cls.rd_con.hset('host', hostname, name)
# 塞回头部,下次继续使用
if len(hosts) < share_host_count:
cls.rd_con.lpush('account_queue', name)
return name, account['cookies']
@classmethod
def delete_cookies(cls, name):
cls.rd_con.hdel('account', name)
if mode == 'quick':
cls.rd_con.hdel('cookies_host', name)
return True
@classmethod
def check_login_task(cls):
if cls.rd_con_broker.llen('login_queue') > 0:
cls.rd_con_broker.delete('login_queue')
@classmethod
def check_cookies_timeout(cls, cookies):
if cookies is None:
return True
if isinstance(cookies, bytes):
cookies = cookies.decode('utf-8')
cookies = json.loads(cookies)
login_time = datetime.datetime.fromtimestamp(cookies['loginTime'])
if datetime.datetime.now() - login_time > datetime.timedelta(hours=cookie_expire_time):
crawler.warning('一个账号已过期')
return True
return False
class Urls(object):
rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('urls'))
@classmethod
def store_crawl_url(cls, url, result):
cls.rd_con.set(url, result)
class IdNames(object):
rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('id_name'))
@classmethod
def store_id_name(cls, user_name, user_id):
cls.rd_con.set(user_name, user_id)
@classmethod
def fetch_uid_by_name(cls, user_name):
user_id = cls.rd_con.get(user_name)
if user_id:
return user_id.decode('utf-8')
return ''
|
Danceiny/HackGirlfriend
|
Spider/WeiboSpider/db/redis_db.py
|
Python
|
apache-2.0
| 6,338
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
AuroraDNS DNS Driver
"""
import base64
import json
import hmac
import datetime
from hashlib import sha256
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.common.base import ConnectionUserAndKey, JsonResponse
from libcloud.common.types import InvalidCredsError, ProviderError
from libcloud.common.types import LibcloudError
from libcloud.dns.base import DNSDriver, Zone, Record
from libcloud.dns.types import RecordType, ZoneDoesNotExistError
from libcloud.dns.types import ZoneAlreadyExistsError, RecordDoesNotExistError
API_HOST = "api.auroradns.eu"
# Default TTL required by libcloud, but doesn't do anything in AuroraDNS
DEFAULT_ZONE_TTL = 3600
DEFAULT_ZONE_TYPE = "master"
VALID_RECORD_PARAMS_EXTRA = ["ttl", "prio", "health_check_id", "disabled"]
class AuroraDNSHealthCheckType(object):
"""
Healthcheck type.
"""
HTTP = "HTTP"
HTTPS = "HTTPS"
TCP = "TCP"
class HealthCheckError(LibcloudError):
error_type = "HealthCheckError"
def __init__(self, value, driver, health_check_id):
self.health_check_id = health_check_id
super(HealthCheckError, self).__init__(value=value, driver=driver)
def __str__(self):
return self.__repr__()
def __repr__(self):
return "<%s in %s, health_check_id=%s, value=%s>" % (
self.error_type,
repr(self.driver),
self.health_check_id,
self.value,
)
class HealthCheckDoesNotExistError(HealthCheckError):
error_type = "HealthCheckDoesNotExistError"
class AuroraDNSHealthCheck(object):
"""
AuroraDNS Healthcheck resource.
"""
def __init__(
self,
id,
type,
hostname,
ipaddress,
port,
interval,
path,
threshold,
health,
enabled,
zone,
driver,
extra=None,
):
"""
:param id: Healthcheck id
:type id: ``str``
:param hostname: Hostname or FQDN of the target
:type hostname: ``str``
:param ipaddress: IPv4 or IPv6 address of the target
:type ipaddress: ``str``
:param port: The port on the target to monitor
:type port: ``int``
:param interval: The interval of the health check
:type interval: ``int``
:param path: The path to monitor on the target
:type path: ``str``
:param threshold: The threshold of before marking a check as failed
:type threshold: ``int``
:param health: The current health of the health check
:type health: ``bool``
:param enabled: If the health check is currently enabled
:type enabled: ``bool``
:param zone: Zone instance.
:type zone: :class:`Zone`
:param driver: DNSDriver instance.
:type driver: :class:`DNSDriver`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.type = type
self.hostname = hostname
self.ipaddress = ipaddress
self.port = int(port) if port else None
self.interval = int(interval)
self.path = path
self.threshold = int(threshold)
self.health = bool(health)
self.enabled = bool(enabled)
self.zone = zone
self.driver = driver
self.extra = extra or {}
def update(
self,
type=None,
hostname=None,
ipaddress=None,
port=None,
interval=None,
path=None,
threshold=None,
enabled=None,
extra=None,
):
return self.driver.ex_update_healthcheck(
healthcheck=self,
type=type,
hostname=hostname,
ipaddress=ipaddress,
port=port,
path=path,
interval=interval,
threshold=threshold,
enabled=enabled,
extra=extra,
)
def delete(self):
return self.driver.ex_delete_healthcheck(healthcheck=self)
def __repr__(self):
return (
"<AuroraDNSHealthCheck: zone=%s, id=%s, type=%s, hostname=%s, "
"ipaddress=%s, port=%d, interval=%d, health=%s, provider=%s"
"...>"
% (
self.zone.id,
self.id,
self.type,
self.hostname,
self.ipaddress,
self.port,
self.interval,
self.health,
self.driver.name,
)
)
class AuroraDNSResponse(JsonResponse):
def success(self):
return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]
def parse_error(self):
status = int(self.status)
error = {"driver": self, "value": ""}
if status == httplib.UNAUTHORIZED:
error["value"] = "Authentication failed"
raise InvalidCredsError(**error)
elif status == httplib.FORBIDDEN:
error["value"] = "Authorization failed"
error["http_code"] = status
raise ProviderError(**error)
elif status == httplib.NOT_FOUND:
context = self.connection.context
if context["resource"] == "zone":
error["zone_id"] = context["id"]
raise ZoneDoesNotExistError(**error)
elif context["resource"] == "record":
error["record_id"] = context["id"]
raise RecordDoesNotExistError(**error)
elif context["resource"] == "healthcheck":
error["health_check_id"] = context["id"]
raise HealthCheckDoesNotExistError(**error)
elif status == httplib.CONFLICT:
context = self.connection.context
if context["resource"] == "zone":
error["zone_id"] = context["id"]
raise ZoneAlreadyExistsError(**error)
elif status == httplib.BAD_REQUEST:
context = self.connection.context
body = self.parse_body()
raise ProviderError(value=body["errormsg"], http_code=status, driver=self)
class AuroraDNSConnection(ConnectionUserAndKey):
host = API_HOST
responseCls = AuroraDNSResponse
def calculate_auth_signature(self, secret_key, method, url, timestamp):
b64_hmac = base64.b64encode(
hmac.new(
b(secret_key), b(method) + b(url) + b(timestamp), digestmod=sha256
).digest()
)
return b64_hmac.decode("utf-8")
def gen_auth_header(self, api_key, secret_key, method, url, timestamp):
signature = self.calculate_auth_signature(secret_key, method, url, timestamp)
auth_b64 = base64.b64encode(b("%s:%s" % (api_key, signature)))
return "AuroraDNSv1 %s" % (auth_b64.decode("utf-8"))
def request(self, action, params=None, data="", headers=None, method="GET"):
if not headers:
headers = {}
if not params:
params = {}
if method in ("POST", "PUT"):
headers = {"Content-Type": "application/json; charset=UTF-8"}
t = datetime.datetime.utcnow()
timestamp = t.strftime("%Y%m%dT%H%M%SZ")
headers["X-AuroraDNS-Date"] = timestamp
headers["Authorization"] = self.gen_auth_header(
self.user_id, self.key, method, action, timestamp
)
return super(AuroraDNSConnection, self).request(
action=action, params=params, data=data, method=method, headers=headers
)
class AuroraDNSDriver(DNSDriver):
name = "AuroraDNS"
website = "https://www.pcextreme.nl/en/aurora/dns"
connectionCls = AuroraDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: "A",
RecordType.AAAA: "AAAA",
RecordType.CNAME: "CNAME",
RecordType.MX: "MX",
RecordType.NS: "NS",
RecordType.SOA: "SOA",
RecordType.SRV: "SRV",
RecordType.TXT: "TXT",
RecordType.DS: "DS",
RecordType.PTR: "PTR",
RecordType.SSHFP: "SSHFP",
RecordType.TLSA: "TLSA",
}
HEALTHCHECK_TYPE_MAP = {
AuroraDNSHealthCheckType.HTTP: "HTTP",
AuroraDNSHealthCheckType.HTTPS: "HTTPS",
AuroraDNSHealthCheckType.TCP: "TCP",
}
def iterate_zones(self):
res = self.connection.request("/zones")
for zone in res.parse_body():
yield self.__res_to_zone(zone)
def iterate_records(self, zone):
self.connection.set_context({"resource": "zone", "id": zone.id})
res = self.connection.request("/zones/%s/records" % zone.id)
for record in res.parse_body():
yield self.__res_to_record(zone, record)
def get_zone(self, zone_id):
self.connection.set_context({"resource": "zone", "id": zone_id})
res = self.connection.request("/zones/%s" % zone_id)
zone = res.parse_body()
return self.__res_to_zone(zone)
def get_record(self, zone_id, record_id):
self.connection.set_context({"resource": "record", "id": record_id})
res = self.connection.request("/zones/%s/records/%s" % (zone_id, record_id))
record = res.parse_body()
zone = self.get_zone(zone_id)
return self.__res_to_record(zone, record)
def create_zone(self, domain, type="master", ttl=None, extra=None):
self.connection.set_context({"resource": "zone", "id": domain})
res = self.connection.request(
"/zones", method="POST", data=json.dumps({"name": domain})
)
zone = res.parse_body()
return self.__res_to_zone(zone)
def create_record(self, name, zone, type, data, extra=None):
if name is None:
name = ""
rdata = {"name": name, "type": self.RECORD_TYPE_MAP[type], "content": data}
rdata = self.__merge_extra_data(rdata, extra)
if "ttl" not in rdata:
rdata["ttl"] = DEFAULT_ZONE_TTL
self.connection.set_context({"resource": "zone", "id": zone.id})
res = self.connection.request(
"/zones/%s/records" % zone.id, method="POST", data=json.dumps(rdata)
)
record = res.parse_body()
return self.__res_to_record(zone, record)
def delete_zone(self, zone):
self.connection.set_context({"resource": "zone", "id": zone.id})
self.connection.request("/zones/%s" % zone.id, method="DELETE")
return True
def delete_record(self, record):
self.connection.set_context({"resource": "record", "id": record.id})
self.connection.request(
"/zones/%s/records/%s" % (record.zone.id, record.id), method="DELETE"
)
return True
def list_record_types(self):
types = []
for record_type in self.RECORD_TYPE_MAP.keys():
types.append(record_type)
return types
def update_record(self, record, name, type, data, extra=None):
rdata = {}
if name is not None:
rdata["name"] = name
if type is not None:
rdata["type"] = self.RECORD_TYPE_MAP[type]
if data is not None:
rdata["content"] = data
rdata = self.__merge_extra_data(rdata, extra)
self.connection.set_context({"resource": "record", "id": record.id})
self.connection.request(
"/zones/%s/records/%s" % (record.zone.id, record.id),
method="PUT",
data=json.dumps(rdata),
)
return self.get_record(record.zone.id, record.id)
def ex_list_healthchecks(self, zone):
"""
List all Health Checks in a zone.
:param zone: Zone to list health checks for.
:type zone: :class:`Zone`
:return: ``list`` of :class:`AuroraDNSHealthCheck`
"""
healthchecks = []
self.connection.set_context({"resource": "zone", "id": zone.id})
res = self.connection.request("/zones/%s/health_checks" % zone.id)
for healthcheck in res.parse_body():
healthchecks.append(self.__res_to_healthcheck(zone, healthcheck))
return healthchecks
def ex_get_healthcheck(self, zone, health_check_id):
"""
Get a single Health Check from a zone
:param zone: Zone in which the health check is
:type zone: :class:`Zone`
:param health_check_id: ID of the required health check
:type health_check_id: ``str``
:return: :class:`AuroraDNSHealthCheck`
"""
self.connection.set_context({"resource": "healthcheck", "id": health_check_id})
res = self.connection.request(
"/zones/%s/health_checks/%s" % (zone.id, health_check_id)
)
check = res.parse_body()
return self.__res_to_healthcheck(zone, check)
def ex_create_healthcheck(
self,
zone,
type,
hostname,
port,
path,
interval,
threshold,
ipaddress=None,
enabled=True,
extra=None,
):
"""
Create a new Health Check in a zone
:param zone: Zone in which the health check should be created
:type zone: :class:`Zone`
:param type: The type of health check to be created
:type type: :class:`AuroraDNSHealthCheckType`
:param hostname: The hostname of the target to monitor
:type hostname: ``str``
:param port: The port of the target to monitor. E.g. 80 for HTTP
:type port: ``int``
:param path: The path of the target to monitor. Only used by HTTP
at this moment. Usually this is simple /.
:type path: ``str``
:param interval: The interval of checks. 10, 30 or 60 seconds.
:type interval: ``int``
:param threshold: The threshold of failures before the healthcheck is
marked as failed.
:type threshold: ``int``
:param ipaddress: (optional) The IP Address of the target to monitor.
You can pass a empty string if this is not required.
:type ipaddress: ``str``
:param enabled: (optional) If this healthcheck is enabled to run
:type enabled: ``bool``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:return: :class:`AuroraDNSHealthCheck`
"""
cdata = {
"type": self.HEALTHCHECK_TYPE_MAP[type],
"hostname": hostname,
"ipaddress": ipaddress,
"port": int(port),
"interval": int(interval),
"path": path,
"threshold": int(threshold),
"enabled": enabled,
}
self.connection.set_context({"resource": "zone", "id": zone.id})
res = self.connection.request(
"/zones/%s/health_checks" % zone.id, method="POST", data=json.dumps(cdata)
)
healthcheck = res.parse_body()
return self.__res_to_healthcheck(zone, healthcheck)
def ex_update_healthcheck(
self,
healthcheck,
type=None,
hostname=None,
ipaddress=None,
port=None,
path=None,
interval=None,
threshold=None,
enabled=None,
extra=None,
):
"""
Update an existing Health Check
:param zone: The healthcheck which has to be updated
:type zone: :class:`AuroraDNSHealthCheck`
:param type: (optional) The type of health check to be created
:type type: :class:`AuroraDNSHealthCheckType`
:param hostname: (optional) The hostname of the target to monitor
:type hostname: ``str``
:param ipaddress: (optional) The IP Address of the target to monitor.
You can pass a empty string if this is not required.
:type ipaddress: ``str``
:param port: (optional) The port of the target to monitor. E.g. 80
for HTTP
:type port: ``int``
:param path: (optional) The path of the target to monitor.
Only used by HTTP at this moment. Usually just '/'.
:type path: ``str``
:param interval: (optional) The interval of checks.
10, 30 or 60 seconds.
:type interval: ``int``
:param threshold: (optional) The threshold of failures before the
healthcheck is marked as failed.
:type threshold: ``int``
:param enabled: (optional) If this healthcheck is enabled to run
:type enabled: ``bool``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:return: :class:`AuroraDNSHealthCheck`
"""
cdata = {}
if type is not None:
cdata["type"] = self.HEALTHCHECK_TYPE_MAP[type]
if hostname is not None:
cdata["hostname"] = hostname
if ipaddress is not None:
if len(ipaddress) == 0:
cdata["ipaddress"] = None
else:
cdata["ipaddress"] = ipaddress
if port is not None:
cdata["port"] = int(port)
if path is not None:
cdata["path"] = path
if interval is not None:
cdata["interval"] = int(interval)
if threshold is not None:
cdata["threshold"] = threshold
if enabled is not None:
cdata["enabled"] = bool(enabled)
self.connection.set_context({"resource": "healthcheck", "id": healthcheck.id})
self.connection.request(
"/zones/%s/health_checks/%s" % (healthcheck.zone.id, healthcheck.id),
method="PUT",
data=json.dumps(cdata),
)
return self.ex_get_healthcheck(healthcheck.zone, healthcheck.id)
def ex_delete_healthcheck(self, healthcheck):
"""
Remove an existing Health Check
:param zone: The healthcheck which has to be removed
:type zone: :class:`AuroraDNSHealthCheck`
"""
self.connection.set_context({"resource": "healthcheck", "id": healthcheck.id})
self.connection.request(
"/zones/%s/health_checks/%s" % (healthcheck.zone.id, healthcheck.id),
method="DELETE",
)
return True
def __res_to_record(self, zone, record):
if len(record["name"]) == 0:
name = None
else:
name = record["name"]
extra = {}
extra["created"] = record["created"]
extra["modified"] = record["modified"]
extra["disabled"] = record["disabled"]
extra["ttl"] = record["ttl"]
extra["priority"] = record["prio"]
return Record(
id=record["id"],
name=name,
type=record["type"],
data=record["content"],
zone=zone,
driver=self.connection.driver,
ttl=record["ttl"],
extra=extra,
)
def __res_to_zone(self, zone):
return Zone(
id=zone["id"],
domain=zone["name"],
type=DEFAULT_ZONE_TYPE,
ttl=DEFAULT_ZONE_TTL,
driver=self.connection.driver,
extra={
"created": zone["created"],
"servers": zone["servers"],
"account_id": zone["account_id"],
"cluster_id": zone["cluster_id"],
},
)
def __res_to_healthcheck(self, zone, healthcheck):
return AuroraDNSHealthCheck(
id=healthcheck["id"],
type=healthcheck["type"],
hostname=healthcheck["hostname"],
ipaddress=healthcheck["ipaddress"],
health=healthcheck["health"],
threshold=healthcheck["threshold"],
path=healthcheck["path"],
interval=healthcheck["interval"],
port=healthcheck["port"],
enabled=healthcheck["enabled"],
zone=zone,
driver=self.connection.driver,
)
def __merge_extra_data(self, rdata, extra):
if extra is not None:
for param in VALID_RECORD_PARAMS_EXTRA:
if param in extra:
rdata[param] = extra[param]
return rdata
|
apache/libcloud
|
libcloud/dns/drivers/auroradns.py
|
Python
|
apache-2.0
| 21,120
|
"""Document class, read document, clean document, get terms."""
import uuid
import string
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from collections import Counter
class Document(object):
def __init__(self, path, r_d, r_p):
print path
self.path = path
self._name = path.split('/')[-1]
self._term = None
# init redis instance for postings and dictionary
self.r_d = r_d
self.r_p = r_p
def read(self):
"""Get terms within documents."""
try:
with open(self.path, 'r') as f:
self._term = f.read()
return self
except EnvironmentError:
raise IOError("File not found")
def lower(self):
"""Terms to lower case."""
self._term = self._term.lower()
return self
def del_punc(self):
"""Remove punc."""
self._term = self._term.translate(
None,
string.punctuation
)
return self
def del_space_stop(self):
"""Remove spaces, stopwords."""
cached = stopwords.words("english")
self._term = [word for word in self._term.split() if word not in cached]
return self
@property
def terms(self):
"""Finish process"""
self.read().lower().del_punc().del_space_stop()
return self._term
@property
def name(self):
"""doc name"""
return self._name
def store(self, terms):
"""Store in db."""
terms = Counter(terms).most_common()
for t, tf in terms:
idx = str(uuid.uuid4())
node_dict = self.r_d.hgetall(t)
if node_dict and node_dict['d'] != self._name:
self.r_d.hmset(t,
{'id': idx, 'df': int(node_dict['df']) + 1,
'd': ','.join([self.r_p.hget(node_dict['id'], 'd'), self._name])})
self.r_p.hmset(idx,
{
't': t,
'd': ','.join([self.r_p.hget(node_dict['id'], 'd') , self._name]),
'tf': ','.join([self.r_p.hget(node_dict['id'], 'tf'), str(tf)])})
else:
self.r_d.hmset(t,
{'id': idx, 'df': 1, 'd': self._name})
self.r_p.hmset(idx,
{
't': t,
'd': self._name,
'tf': tf
})
|
bwanglzu/inverted-index.py
|
inverted_index/document.py
|
Python
|
mit
| 2,490
|
#!/usr/bin/python
#============================ adjust path =====================================
import sys
import os
if __name__ == "__main__":
here = sys.path[0]
sys.path.insert(0, os.path.join(here, '..', '..','libs'))
sys.path.insert(0, os.path.join(here, '..', '..','external_libs'))
#============================ verify installation =============================
from SmartMeshSDK.utils import SmsdkInstallVerifier
(goodToGo,reason) = SmsdkInstallVerifier.verifyComponents(
[
SmsdkInstallVerifier.PYTHON,
SmsdkInstallVerifier.PYSERIAL,
]
)
if not goodToGo:
print "Your installation does not allow this application to run:\n"
print reason
raw_input("Press any button to exit")
sys.exit(1)
#============================ imports =========================================
import requests
import json
from SmartMeshSDK import sdk_version
from SmartMeshSDK.utils import AppUtils, \
FormatUtils
from SmartMeshSDK.IpMgrConnectorSerial import IpMgrConnectorSerial
from SmartMeshSDK.IpMgrConnectorMux import IpMgrSubscribe
from SmartMeshSDK.protocols.oap import OAPDispatcher, \
OAPNotif
#============================ logging =========================================
# local
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('App')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
# global
AppUtils.configureLogging()
#============================ defines =========================================
DEFAULT_SERIALPORT = 'COM7'
SERVER_HOST = 'clouddata.dustcloud.org'
SERVER_PORT = '80'
#============================ helper functions ================================
# called when the manager generates a data notification
def handle_data(notifName, notifParams):
# have the OAP dispatcher parse the packet.
# It will call handle_oap_data() is this data is a valid OAP data.
oapdispatcher.dispatch_pkt(notifName, notifParams)
# called when the OAP dispatcher can successfully parse received data as OAP
def handle_oap_data(mac,notif):
if isinstance(notif,OAPNotif.OAPTempSample):
mac = FormatUtils.formatMacString(mac)
temperature = float(notif.samples[0])/100
try:
r = requests.post(
"http://{0}:{1}/api/v1/oap".format(SERVER_HOST,SERVER_PORT),
data = json.dumps({
'mac': mac,
'temperature': temperature,
}),
headers = {
'Content-type': 'application/json',
}
)
except Exception as err:
print err
else:
print 'sent mac={0} temperature={1:.2f}C'.format(mac,temperature)
#============================ main ============================================
# print banner
print 'PublishToWeb - (c) Dust Networks'
print 'SmartMesh SDK {0}'.format('.'.join([str(b) for b in sdk_version.VERSION]))
# set up the OAP dispatcher (which parses OAP packets)
oapdispatcher = OAPDispatcher.OAPDispatcher()
oapdispatcher.register_notif_handler(handle_oap_data)
# ask user for serial port number
serialport = raw_input('\nSmartMesh IP manager\'s API serial port (leave blank for '+DEFAULT_SERIALPORT+'): ')
if not serialport.strip():
serialport = DEFAULT_SERIALPORT
# connect to manager
connector = IpMgrConnectorSerial.IpMgrConnectorSerial()
try:
connector.connect({
'port': serialport,
})
except Exception as err:
print 'failed to connect to manager at {0}, error ({1})\n{2}'.format(
serialport,
type(err),
err
)
raw_input('Aborting. Press Enter to close.')
sys.exit(1)
else:
print 'Connected to {0}.\n'.format(serialport)
# subscribe to data notifications
subscriber = IpMgrSubscribe.IpMgrSubscribe(connector)
subscriber.start()
subscriber.subscribe(
notifTypes = [
IpMgrSubscribe.IpMgrSubscribe.NOTIFDATA,
],
fun = handle_data,
isRlbl = False,
)
|
realms-team/basestation-fw
|
libs/smartmeshsdk-REL-1.3.0.1/app/PublishToWeb/PublishToWeb.py
|
Python
|
bsd-3-clause
| 4,295
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name="index"),
url(r'^signin/$', views.signin, name='signin'),
url(r'^create_user/(?P<user_id>[0-9]+)/(?P<user_class>\
([a-z])+)$', views.create_new_user, \
name='new_user'),
url(r'^timetable/(?P<user_id>[0-9]+)$', \
views.get_timetable, name='timetable'),
url(r'^notes/(?P<user_id>[0-9]+)$', \
views.get_notes, name='notes'),
url(r'^subject_data/(?P<user_id>[0-9]+)$', \
views.get_sub_data, name='subject_data'),
url(r'^events/(?P<user_id>[0-9]+)$', \
views.get_events_dummy, name='events'),
url(r'^track_data/$', \
views.get_track_data, name='events'),
url(r'^calendar/(?P<user_id>[0-9]+)$', \
views.get_cal_data_dummy, name='events'),
url(r'^subject_attendence/(?P<user_id>[0-9]+)$', \
views.get_attendence, name='get_attendence'),
url(r'^create_user/$', \
views.create_new_user, name='new_user'),
url(r'^update_attendence/$', \
views.update_attendence,\
name='update_attendence'),
url(r'^set_track_data/$', \
views.set_track_data, name='set_track_data'),
]
|
meain/bridge
|
docs/report/urls.py
|
Python
|
mit
| 1,263
|
"""PID Fetchers."""
from collections import namedtuple
from ..providers.docid import DocUUIDProvider
FetchedPID = namedtuple('FetchedPID', ['provider', 'pid_type', 'pid_value'])
def cernopendata_docid_fetcher(record_uuid, data):
"""Fetch a article's identifiers."""
return FetchedPID(
provider=DocUUIDProvider,
pid_type=DocUUIDProvider.pid_type,
pid_value=data['control_number'],
)
|
tiborsimko/opendata.cern.ch
|
cernopendata/modules/records/fetchers/docid.py
|
Python
|
gpl-2.0
| 423
|
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.admin.utils import quote
from django.contrib.contenttypes.models import ContentType
try:
from django.urls import NoReverseMatch, reverse
except ImportError:
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.db.models import QuerySet, Q
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.six import iteritems, integer_types
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import lazy
from jsonfield import JSONField
from .signals import action_logged
from . import settings as app_conf
import json
class LogActionManager(models.Manager):
def create_log_action(self, **kwargs):
"""
Helper method to create a new log entry.
This method automatically populates some fields when no explicit value is given.
:param instance: The model instance to log a change for.
:type instance: Model
:param kwargs: Field overrides for the :py:class:`LogAction` object.
:return: The new log entry or `None` if there were no changes.
:rtype: LogAction
"""
instance = kwargs.get('instance', None)
if instance is not None:
del kwargs['instance']
request = kwargs.get('request', None)
if request is not None:
del kwargs['request']
# Let's grab the current IP of the user.
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
remote_ip = x_forwarded_for.split(',')[0]
else:
remote_ip = request.META.get('REMOTE_ADDR')
kwargs.setdefault('remote_ip', remote_ip)
if instance is not None:
pk = self._get_pk_value(instance)
kwargs.setdefault(
'content_type',
ContentType.objects.get_for_model(instance)
)
kwargs.setdefault('object_pk', pk)
kwargs.setdefault('object_repr', smart_text(instance))
if isinstance(pk, integer_types):
kwargs.setdefault('object_id', pk)
get_object_extra_info = getattr(
instance,
'get_object_extra_info',
None
)
if callable(get_object_extra_info):
kwargs.setdefault('object_extra_info', get_object_extra_info())
# Delete log entries with the same pk as a newly created model.
# This should only be necessary when an pk is used twice.
if kwargs.get('action', None) is app_conf.CREATE:
is_obj_exists = self.filter(
content_type=kwargs.get('content_type'),
object_id=kwargs.get('object_id')
).exists()
if kwargs.get('object_id', None) is not None and is_obj_exists:
self.filter(
content_type=kwargs.get('content_type'),
object_id=kwargs.get('object_id')
).delete()
else:
self.filter(
content_type=kwargs.get('content_type'),
object_pk=kwargs.get('object_pk', '')
).delete()
action_log = self.create(**kwargs)
action_logged.send(sender=LogAction, action=action_log)
return action_log
def get_for_model(self, model):
"""
Get log entries for all objects of a specified type.
:param model: The model to get log entries for.
:type model: class
:return: QuerySet of log entries for the given model.
:rtype: QuerySet
"""
# Return empty queryset if the given object is not valid.
if not issubclass(model, models.Model):
return self.none()
ct = ContentType.objects.get_for_model(model)
return self.filter(content_type=ct)
def get_for_objects(self, queryset):
"""
Get log entries for the objects in the specified queryset.
:param queryset: The queryset to get the log entries for.
:type queryset: QuerySet
:return: The LogAction objects for the objects in the given queryset.
:rtype: QuerySet
"""
if not isinstance(queryset, QuerySet) or queryset.count() == 0:
return self.none()
content_type = ContentType.objects.get_for_model(queryset.model)
primary_keys = queryset.values_list(queryset.model._meta.pk.name, flat=True)
if isinstance(primary_keys[0], integer_types):
return self.filter(content_type=content_type).filter(Q(object_id__in=primary_keys)).distinct()
else:
return self.filter(content_type=content_type).filter(Q(object_pk__in=primary_keys)).distinct()
def _get_pk_value(self, instance):
"""
Get the primary key field value for a model instance.
:param instance: The model instance to get the primary key for.
:type instance: Model
:return: The primary key value of the given model instance.
"""
pk_field = instance._meta.pk.name
pk = getattr(instance, pk_field, None)
# Check to make sure that we got an pk not a model object.
if isinstance(pk, models.Model):
pk = self._get_pk_value(pk)
return pk
def get_action_choices():
return app_conf.LOG_ACTION_CHOICES
@python_2_unicode_compatible
class LogAction(models.Model):
content_type = models.ForeignKey(
'contenttypes.ContentType', related_name='+',
verbose_name=_("content type"),
blank=True, null=True, on_delete=models.SET_NULL
)
object_id = models.BigIntegerField(
verbose_name=_("object id"),
blank=True, null=True, db_index=True
)
object_pk = models.CharField(
verbose_name=_("object pk"), max_length=255,
blank=True, null=True, db_index=True
)
object_repr = models.TextField(
verbose_name=_("object representation"),
blank=True, null=True
)
object_extra_info = JSONField(
verbose_name=_("object information"),
blank=True, null=True
)
session_key = models.CharField(_('session key'), max_length=40, blank=True, null=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name=_("user"),
blank=True, null=True,
on_delete=models.SET_NULL, related_name='actionlogs'
)
action = models.PositiveSmallIntegerField(verbose_name=_("action"), blank=True, null=True)
action_info = JSONField(
verbose_name=_("action information"),
blank=True, null=True
)
changes = models.TextField(blank=True, verbose_name=_("change message"))
remote_ip = models.GenericIPAddressField(
verbose_name=_("remote IP"), blank=True, null=True
)
created_at = models.DateTimeField(
verbose_name=_("created at"), auto_now_add=True, db_index=True
)
objects = LogActionManager()
class Meta:
ordering = ['-created_at']
verbose_name = _("log action")
verbose_name_plural = _("log actions")
def __str__(self):
if self.object_repr:
return _("Logged {repr:s}").format(repr=self.object_repr)
elif self.action:
return _("Logged action, type: {action}, id: {id}").format(
action=self.get_action_display(),
id=self.id
)
else:
return _("Logged action, id: {id}").format(id=self.id)
def __init__(self, *args, **kwargs):
super(LogAction, self).__init__(*args, **kwargs)
try:
self._meta.get_field('action').choices = \
lazy(get_action_choices, list)()
except:
# for Django < 1.11
self._meta.get_field_by_name('action')[0]._choices = \
lazy(get_action_choices, list)()
def get_action_display(self):
for action in app_conf.LOG_ACTION_CHOICES:
if action[0] == self.action:
return action[1]
return _('Not provided')
def get_edited_object(self):
"""Returns the edited object represented by this log entry"""
return self.content_type.get_object_for_this_type(pk=self.object_id)
def get_admin_url(self):
"""
Returns the admin URL to edit the object represented by this log entry.
"""
if self.content_type and self.object_id:
url_name = 'admin:%s_%s_change' % (
self.content_type.app_label,
self.content_type.model
)
try:
return reverse(url_name, args=(quote(self.object_id),))
except NoReverseMatch:
pass
return None
@property
def changes_dict(self):
"""
:return: The changes recorded in this log entry as a dictionary object.
"""
try:
return json.loads(self.changes)
except ValueError:
return {}
@property
def changes_str(self, colon=': ', arrow=smart_text(' \u2192 '), separator='; '):
"""
Return the changes recorded in this log entry as a string.
The formatting of the string can be customized by
setting alternate values for colon, arrow and separator.
If the formatting is still not satisfying, please use
:py:func:`LogAction.changes_dict` and format the string yourself.
:param colon: The string to place between the field name and the values.
:param arrow: The string to place between each old and new value.
:param separator: The string to place between each field.
:return: A readable string of the changes in this log entry.
"""
substrings = []
for field, values in iteritems(self.changes_dict):
substring = smart_text('{field_name:s}{colon:s}{old:s}{arrow:s}{new:s}').format(
field_name=field,
colon=colon,
old=values[0],
arrow=arrow,
new=values[1],
)
substrings.append(substring)
return separator.join(substrings)
|
shtalinberg/django-actions-logger
|
actionslog/models.py
|
Python
|
mit
| 10,371
|
# Copyright (C) 2005, Todd Whiteman
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#===============================================================================
# I M P O R T S
#===============================================================================
import wx
backgroundColour = wx.Colour(230, 230, 255)
################################################################################
# pySIM skin helper classes #
################################################################################
class wxskinFrame(wx.Frame):
def __init__(self, parent, ID=-1, title="Frame", pos=wx.DefaultPosition, size=wx.DefaultSize):
wx.Frame.__init__(self, parent, ID, title, pos, size)
self.SetBackgroundColour(backgroundColour)
icon = wx.Icon('pySIM.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(icon)
class wxskinPanel(wx.Panel):
def __init__(self, parent, ID=-1, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.TAB_TRAVERSAL, name="panel"):
wx.Panel.__init__(self, parent, ID, pos, size, style, name)
self.SetBackgroundColour(backgroundColour)
class wxskinDialog(wx.Dialog):
def __init__(self, parent, id=-1, title="Dialog", pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_DIALOG_STYLE):
wx.Dialog.__init__(self, parent, id, title)
self.SetBackgroundColour(backgroundColour)
class wxskinStaticText(wx.StaticText):
def __init__(self, parent, id, text):
wx.StaticText.__init__(self, parent, id, text)
self.SetBackgroundColour(backgroundColour)
class wxskinListCtrl(wx.ListCtrl):
def __init__(self, parent, ID=-1, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.LC_ICON):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
self.SetBackgroundColour(backgroundColour)
class wxskinProgressDialog(wx.ProgressDialog):
def __init__(self, title, message, maximum=100, parent=None, style=wx.PD_AUTO_HIDE|wx.PD_APP_MODAL):
wx.ProgressDialog.__init__(self, title, message, maximum, parent, style)
self.SetBackgroundColour(backgroundColour)
class wxskinMessageDialog(wx.MessageDialog):
def __init__(self, parent, messageString, titleString="pySIM", style=wx.OK | wx.ICON_INFORMATION, pos=wx.DefaultPosition):
wx.MessageDialog.__init__(self, parent, messageString, titleString, style, pos)
self.SetBackgroundColour(backgroundColour)
class wxskinTextEntryDialog(wx.TextEntryDialog):
def __init__(self, parent, messageString, titleString="pySIM", defaultValue='', style=wx.OK|wx.CANCEL|wx.CENTRE, pos=wx.DefaultPosition):
wx.TextEntryDialog.__init__(self, parent, messageString, titleString, defaultValue, style, pos)
self.SetBackgroundColour(backgroundColour)
################################################################################
# pySIM dialog helper classes #
################################################################################
class pySIMmessage(wxskinMessageDialog):
def __init__(self, parent, messageString, titleString="pySIM", style=wx.OK | wx.ICON_INFORMATION):
wxskinMessageDialog.__init__(self, parent, messageString, titleString, style)
self.ShowModal()
self.Destroy()
class pySIMenterText(wxskinTextEntryDialog):
def __init__(self, parent, messageString, titleString="pySIM", defaultValue=''):
wxskinTextEntryDialog.__init__(self, parent, messageString, titleString, defaultValue)
ret = self.ShowModal()
val = self.GetValue()
self.Destroy()
return (ret, val)
################################################################################
# pySIM other helper classes #
################################################################################
class pySIMvalidator(wx.PyValidator):
def __init__(self, charmap=None, minlength=None, maxlength=None):
wx.PyValidator.__init__(self)
self.charmap = charmap
self.minlength = minlength
self.maxlength = maxlength
wx.EVT_CHAR(self, self.OnChar)
def Clone(self):
return pySIMvalidator(self.charmap, self.minlength, self.maxlength)
def Validate(self, win):
tc = self.GetWindow()
val = tc.GetValue()
if self.charmap:
for x in val:
if x not in self.charmap:
return False
if self.minlength:
if len(val) < self.minlength:
return False
if self.maxlength:
if len(val) > self.maxlength:
return False
return True
def TransferToWindow(self):
return True # Prevent wxDialog from complaining.
def TransferFromWindow(self):
return True # Prevent wxDialog from complaining.
def OnChar(self, event):
key = event.KeyCode
if key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255:
event.Skip()
return
if not self.charmap or chr(key) in self.charmap:
val = self.GetWindow().GetValue()
if not self.maxlength or len(val) < self.maxlength:
event.Skip()
return
if not wx.Validator_IsSilent():
wx.Bell()
# Returning without calling even.Skip eats the event before it
# gets to the text control
return
|
toddw-as/pySIM
|
pySIMskin.py
|
Python
|
gpl-3.0
| 6,174
|
while True:
phase = int(input("주사위 면의 개수를 입력하세요 : "))
if phase in [4, 6, 8, 12, 20]:
박세훈 = 2
while 박세훈 >= 0:
if 박세훈 == 2:
dice1 = int(input("플레이어가 던질 주사위의 개수를 입력하세요 : "))
a=[]
while dice1 > 0:
a.append(random.randint(1,int(phase)))
dice1 = dice1-1
print("플레이어가 주사위를 던진 결과의 리스트 : "+str(a)+"\n"+"플레이어의 합 : "+str(sum(a)))
박세훈 = 박세훈-1
elif 박세훈 == 1:
dice2 = int(input("컴퓨터가 던질 주사위의 개수를 입력하세요 : "))
b=[]
while dice2 > 0:
b.append(random.randint(1,int(phase)))
dice2 = dice2-1
print("컴퓨터가 주사위를 던진 결과의 리스트 : "+str(b)+"\n"+"컴퓨터의 합 : "+str(sum(b)))
박세훈 = 박세훈-1
elif 박세훈 == 0:
if sum(a) > sum(b):
print("플레이어가 이겻습니다!!!")
elif sum(b) > sum(a):
print("컴퓨터가 이겼습니다!!!")
else:
print("비겼습니다!!!")
박세훈 = 박세훈-1
else:
print("정다면체가 아닌데?")
|
imn00133/pythonSeminar17
|
exercise/dicegame/Sehun/08_21_dicegame1-4.py
|
Python
|
mit
| 1,144
|
#!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Process Android library resources to generate R.java and crunched images."""
import optparse
import os
import subprocess
BUILD_ANDROID_DIR = os.path.dirname(__file__)
def ParseArgs():
"""Parses command line options.
Returns:
An options object as from optparse.OptionsParser.parse_args()
"""
parser = optparse.OptionParser()
parser.add_option('--android-sdk', help='path to the Android SDK folder')
parser.add_option('--android-sdk-tools',
help='path to the Android SDK platform tools folder')
parser.add_option('--R-package', help='Java package for generated R.java')
parser.add_option('--R-dir', help='directory to hold generated R.java')
parser.add_option('--res-dir', help='directory containing resources')
parser.add_option('--out-res-dir',
help='directory to hold crunched resources')
# This is part of a temporary fix for crbug.com/177552.
# TODO(newt): remove this once crbug.com/177552 is fixed in ninja.
parser.add_option('--ignore', help='this argument is ignored')
(options, args) = parser.parse_args()
if args:
parser.error('No positional arguments should be given.')
# Check that required options have been provided.
required_options = ('android_sdk', 'android_sdk_tools', 'R_package',
'R_dir', 'res_dir', 'out_res_dir')
for option_name in required_options:
if getattr(options, option_name) is None:
parser.error('--%s is required' % option_name.replace('_', '-'))
return options
def main():
options = ParseArgs()
android_jar = os.path.join(options.android_sdk, 'android.jar')
aapt = os.path.join(options.android_sdk_tools, 'aapt')
dummy_manifest = os.path.join(BUILD_ANDROID_DIR, 'AndroidManifest.xml')
# Generate R.java. This R.java contains non-final constants and is used only
# while compiling the library jar (e.g. chromium_content.jar). When building
# an apk, a new R.java file with the correct resource -> ID mappings will be
# generated by merging the resources from all libraries and the main apk
# project.
package_command = [aapt,
'package',
'-m',
'--non-constant-id',
'--custom-package', options.R_package,
'-M', dummy_manifest,
'-S', options.res_dir,
'--auto-add-overlay',
'-I', android_jar,
'--output-text-symbols', options.R_dir,
'-J', options.R_dir]
# If strings.xml was generated from a grd file, it will be in out_res_dir.
if os.path.isdir(options.out_res_dir):
package_command += ['-S', options.out_res_dir]
subprocess.check_call(package_command)
# Crunch image resources. This shrinks png files and is necessary for 9-patch
# images to display correctly.
subprocess.check_call([aapt,
'crunch',
'-S', options.res_dir,
'-C', options.out_res_dir])
if __name__ == '__main__':
main()
|
matsumoto-r/synciga
|
src/build/android/process_resources.py
|
Python
|
bsd-3-clause
| 3,264
|
#!/usr/bin/python
# (c) 2012, Elliott Foster <elliott@fourkitchens.com>
# Sponsored by Four Kitchens http://fourkitchens.com.
# (c) 2014, Epic Games, Inc.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: mongodb_user
short_description: Adds or removes a user from a MongoDB database
description:
- Adds or removes a user from a MongoDB database.
version_added: "1.1"
options:
login_user:
description:
- The MongoDB username used to authenticate with.
type: str
login_password:
description:
- The login user's password used to authenticate with.
type: str
login_host:
description:
- The host running the database.
default: localhost
type: str
login_port:
description:
- The MongoDB port to connect to.
default: '27017'
type: str
login_database:
version_added: "2.0"
description:
- The database where login credentials are stored.
type: str
replica_set:
version_added: "1.6"
description:
- Replica set to connect to (automatically connects to primary for writes).
type: str
database:
description:
- The name of the database to add/remove the user from.
required: true
type: str
aliases: [db]
name:
description:
- The name of the user to add or remove.
required: true
aliases: [user]
type: str
password:
description:
- The password to use for the user.
type: str
aliases: [pass]
ssl:
version_added: "1.8"
description:
- Whether to use an SSL connection when connecting to the database.
type: bool
ssl_cert_reqs:
version_added: "2.2"
description:
- Specifies whether a certificate is required from the other side of the connection,
and whether it will be validated if provided.
default: CERT_REQUIRED
choices: [CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED]
type: str
roles:
version_added: "1.3"
type: list
elements: raw
description:
- >
The database user roles valid values could either be one or more of the following strings:
'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase',
'dbAdminAnyDatabase'
- "Or the following dictionary '{ db: DATABASE_NAME, role: ROLE_NAME }'."
- "This param requires pymongo 2.5+. If it is a string, mongodb 2.4+ is also required. If it is a dictionary, mongo 2.6+ is required."
state:
description:
- The database user state.
default: present
choices: [absent, present]
type: str
update_password:
default: always
choices: [always, on_create]
version_added: "2.1"
description:
- C(always) will update passwords if they differ.
- C(on_create) will only set the password for newly created users.
type: str
notes:
- Requires the pymongo Python package on the remote host, version 2.4.2+. This
can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
requirements: [ "pymongo" ]
author:
- "Elliott Foster (@elliotttf)"
- "Julien Thebault (@Lujeni)"
'''
EXAMPLES = '''
- name: Create 'burgers' database user with name 'bob' and password '12345'.
mongodb_user:
database: burgers
name: bob
password: 12345
state: present
- name: Create a database user via SSL (MongoDB must be compiled with the SSL option and configured properly)
mongodb_user:
database: burgers
name: bob
password: 12345
state: present
ssl: True
- name: Delete 'burgers' database user with name 'bob'.
mongodb_user:
database: burgers
name: bob
state: absent
- name: Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style)
mongodb_user:
database: burgers
name: ben
password: 12345
roles: read
state: present
- name: Define roles
mongodb_user:
database: burgers
name: jim
password: 12345
roles: readWrite,dbAdmin,userAdmin
state: present
- name: Define roles
mongodb_user:
database: burgers
name: joe
password: 12345
roles: readWriteAnyDatabase
state: present
- name: Add a user to database in a replica set, the primary server is automatically discovered and written to
mongodb_user:
database: burgers
name: bob
replica_set: belcher
password: 12345
roles: readWriteAnyDatabase
state: present
# add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is useful for oplog access (MONGO_OPLOG_URL).
# please notice the credentials must be added to the 'admin' database because the 'local' database is not synchronized and can't receive user credentials
# To login with such user, the connection string should be MONGO_OPLOG_URL="mongodb://oplog_reader:oplog_reader_password@server1,server2/local?authSource=admin"
# This syntax requires mongodb 2.6+ and pymongo 2.5+
- name: Roles as a dictionary
mongodb_user:
login_user: root
login_password: root_password
database: admin
user: oplog_reader
password: oplog_reader_password
state: present
replica_set: belcher
roles:
- db: local
role: read
'''
RETURN = '''
user:
description: The name of the user to add or remove.
returned: success
type: str
'''
import os
import ssl as ssl_lib
import traceback
from ansible.module_utils.compat.version import LooseVersion
from operator import itemgetter
try:
from pymongo.errors import ConnectionFailure
from pymongo.errors import OperationFailure
from pymongo import version as PyMongoVersion
from pymongo import MongoClient
except ImportError:
try: # for older PyMongo 2.2
from pymongo import Connection as MongoClient
except ImportError:
pymongo_found = False
else:
pymongo_found = True
else:
pymongo_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils.six.moves import configparser
from ansible.module_utils._text import to_native
# =========================================
# MongoDB module specific support methods.
#
def check_compatibility(module, client):
"""Check the compatibility between the driver and the database.
See: https://docs.mongodb.com/ecosystem/drivers/driver-compatibility-reference/#python-driver-compatibility
Args:
module: Ansible module.
client (cursor): Mongodb cursor on admin database.
"""
loose_srv_version = LooseVersion(client.server_info()['version'])
loose_driver_version = LooseVersion(PyMongoVersion)
if loose_srv_version >= LooseVersion('3.2') and loose_driver_version < LooseVersion('3.2'):
module.fail_json(msg=' (Note: you must use pymongo 3.2+ with MongoDB >= 3.2)')
elif loose_srv_version >= LooseVersion('3.0') and loose_driver_version <= LooseVersion('2.8'):
module.fail_json(msg=' (Note: you must use pymongo 2.8+ with MongoDB 3.0)')
elif loose_srv_version >= LooseVersion('2.6') and loose_driver_version <= LooseVersion('2.7'):
module.fail_json(msg=' (Note: you must use pymongo 2.7+ with MongoDB 2.6)')
elif LooseVersion(PyMongoVersion) <= LooseVersion('2.5'):
module.fail_json(msg=' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)')
def user_find(client, user, db_name):
"""Check if the user exists.
Args:
client (cursor): Mongodb cursor on admin database.
user (str): User to check.
db_name (str): User's database.
Returns:
dict: when user exists, False otherwise.
"""
for mongo_user in client["admin"].system.users.find():
if mongo_user['user'] == user:
# NOTE: there is no 'db' field in mongo 2.4.
if 'db' not in mongo_user:
return mongo_user
if mongo_user["db"] == db_name:
return mongo_user
return False
def user_add(module, client, db_name, user, password, roles):
# pymongo's user_add is a _create_or_update_user so we won't know if it was changed or updated
# without reproducing a lot of the logic in database.py of pymongo
db = client[db_name]
if roles is None:
db.add_user(user, password, False)
else:
db.add_user(user, password, None, roles=roles)
def user_remove(module, client, db_name, user):
exists = user_find(client, user, db_name)
if exists:
if module.check_mode:
module.exit_json(changed=True, user=user)
db = client[db_name]
db.remove_user(user)
else:
module.exit_json(changed=False, user=user)
def load_mongocnf():
config = configparser.RawConfigParser()
mongocnf = os.path.expanduser('~/.mongodb.cnf')
try:
config.readfp(open(mongocnf))
creds = dict(
user=config.get('client', 'user'),
password=config.get('client', 'pass')
)
except (configparser.NoOptionError, IOError):
return False
return creds
def check_if_roles_changed(uinfo, roles, db_name):
# We must be aware of users which can read the oplog on a replicaset
# Such users must have access to the local DB, but since this DB does not store users credentials
# and is not synchronized among replica sets, the user must be stored on the admin db
# Therefore their structure is the following :
# {
# "_id" : "admin.oplog_reader",
# "user" : "oplog_reader",
# "db" : "admin", # <-- admin DB
# "roles" : [
# {
# "role" : "read",
# "db" : "local" # <-- local DB
# }
# ]
# }
def make_sure_roles_are_a_list_of_dict(roles, db_name):
output = list()
for role in roles:
if isinstance(role, (binary_type, text_type)):
new_role = {"role": role, "db": db_name}
output.append(new_role)
else:
output.append(role)
return output
roles_as_list_of_dict = make_sure_roles_are_a_list_of_dict(roles, db_name)
uinfo_roles = uinfo.get('roles', [])
if sorted(roles_as_list_of_dict, key=itemgetter('db')) == sorted(uinfo_roles, key=itemgetter('db')):
return False
return True
# =========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default='localhost'),
login_port=dict(default='27017'),
login_database=dict(default=None),
replica_set=dict(default=None),
database=dict(required=True, aliases=['db']),
name=dict(required=True, aliases=['user']),
password=dict(aliases=['pass'], no_log=True),
ssl=dict(default=False, type='bool'),
roles=dict(default=None, type='list', elements='raw'),
state=dict(default='present', choices=['absent', 'present']),
update_password=dict(default="always", choices=["always", "on_create"]),
ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED']),
),
supports_check_mode=True
)
if not pymongo_found:
module.fail_json(msg=missing_required_lib('pymongo'))
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_database = module.params['login_database']
replica_set = module.params['replica_set']
db_name = module.params['database']
user = module.params['name']
password = module.params['password']
ssl = module.params['ssl']
roles = module.params['roles'] or []
state = module.params['state']
update_password = module.params['update_password']
try:
connection_params = {
"host": login_host,
"port": int(login_port),
}
if replica_set:
connection_params["replicaset"] = replica_set
if ssl:
connection_params["ssl"] = ssl
connection_params["ssl_cert_reqs"] = getattr(ssl_lib, module.params['ssl_cert_reqs'])
client = MongoClient(**connection_params)
# NOTE: this check must be done ASAP.
# We doesn't need to be authenticated (this ability has lost in PyMongo 3.6)
if LooseVersion(PyMongoVersion) <= LooseVersion('3.5'):
check_compatibility(module, client)
if login_user is None and login_password is None:
mongocnf_creds = load_mongocnf()
if mongocnf_creds is not False:
login_user = mongocnf_creds['user']
login_password = mongocnf_creds['password']
elif login_password is None or login_user is None:
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
if login_user is not None and login_password is not None:
client.admin.authenticate(login_user, login_password, source=login_database)
elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'):
if db_name != "admin":
module.fail_json(msg='The localhost login exception only allows the first admin account to be created')
# else: this has to be the first admin user added
except Exception as e:
module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc())
if state == 'present':
if password is None and update_password == 'always':
module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create')
try:
if update_password != 'always':
uinfo = user_find(client, user, db_name)
if uinfo:
password = None
if not check_if_roles_changed(uinfo, roles, db_name):
module.exit_json(changed=False, user=user)
if module.check_mode:
module.exit_json(changed=True, user=user)
user_add(module, client, db_name, user, password, roles)
except Exception as e:
module.fail_json(msg='Unable to add or update user: %s' % to_native(e), exception=traceback.format_exc())
finally:
try:
client.close()
except Exception:
pass
# Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848
# newuinfo = user_find(client, user, db_name)
# if uinfo['role'] == newuinfo['role'] and CheckPasswordHere:
# module.exit_json(changed=False, user=user)
elif state == 'absent':
try:
user_remove(module, client, db_name, user)
except Exception as e:
module.fail_json(msg='Unable to remove user: %s' % to_native(e), exception=traceback.format_exc())
finally:
try:
client.close()
except Exception:
pass
module.exit_json(changed=True, user=user)
if __name__ == '__main__':
main()
|
maxamillion/ansible
|
test/support/integration/plugins/modules/mongodb_user.py
|
Python
|
gpl-3.0
| 16,253
|
import core.job
import core.implant
import uuid
class EnableRDesktopJob(core.job.Job):
def done(self):
self.display()
def display(self):
pass
#self.shell.print_plain(str(self.errno))
class EnableRDesktopImplant(core.implant.Implant):
NAME = "Enable Remote Desktop"
DESCRIPTION = "Enables RDP on the target system."
AUTHORS = ["RiskSense, Inc."]
def load(self):
self.options.register("ENABLE", "true", "toggle to enable or disable", enum=["true", "false"])
self.options.register("MODE", "", "the value for this script", hidden=True)
def run(self):
mode = "0" if self.options.get("ENABLE") == "true" else "1"
self.options.set("MODE", mode)
workloads = {}
#workloads["vbs"] = self.load_script("data/implant/manage/enable_rdesktop.vbs", self.options)
workloads["js"] = self.loader.load_script("data/implant/manage/enable_rdesktop.js", self.options)
self.dispatch(workloads, EnableRDesktopJob)
|
TheNaterz/koadic
|
modules/implant/manage/enable_rdesktop.py
|
Python
|
apache-2.0
| 1,013
|
import os.path
link_download2="http://www.nyaa.se/?page=download&tid=540649"
rss_anime_koi="http://www.nyaa.se/?page=rss&user=176350"
#using feedparser
import feedparser
import urllib2
#unziped the gziped torrent
import gzip,urllib2,StringIO
#open rss of dengkun
d = feedparser.parse(rss_anime_koi)
#save path to directory
save_path="C:/output/"
#for i in range(0,3):
#getFile=d.entries[i].link
#create file with name str(i) and .torrent
#fileName=os.path.join(save_path,str(i)+".torrent")
#write file to above path and save_path
#file1=open(fileName,"w")
#write content
#file1.write(getFile)
#u = urllib2.urlopen(getFile)
#fileName.close()
#localFile = open(str(i)+'.txt', 'w')
#localFile.write(u.read())
#localFile.close()
#torrent file error bencoding
"""
for i in range(0,3):
getFile=d.entries[i].link
with open(str(i)+'.torrent', "w") as f:
f.write(urllib2.urlopen(getFile).read())
f.close()
"""
"""
getFile=d.entries[1].link
getFile=getFile.replace("#38;","")
with open(str(1)+'.torrent',"w") as f:
f.write(urllib2.urlopen(getFile).read())
f.close()
"""
numOfFiles=10
i=0
while i<numOfFiles:
i+=1
try:
url=d.entries[i].link
url=url.replace("#38;","")
req = urllib2.Request(url)
opener = urllib2.build_opener()
response = opener.open(req)
data = response.read()
#content-encoding key error => not zipped
if response.info()['content-encoding'] == 'gzip':
gzipper = gzip.GzipFile(StringIO(fileobj=data))
plain = gzipper.read()
data = plain
#output.write(data)
with open(str(1)+'.torrent', "w") as output:
output.write(data)
output.close()
except:
import webbrowser
new = 2 # open in a new tab, if possible
webbrowser.open(url,new=new)
print "completed download file "+str(i)
"""
from io import BytesIO
torrentURL=d.entries[1].link
correctURL=torrentURL.replace("#38;","")
torrent = urllib2.urlopen(correctURL, timeout=30)
buffer = BytesIO(torrent.read())
gz = gzip.GzipFile(fileobj=buffer)
output = open('1.torrent', 'wb')
output.write(gz.read())
"""
#not a zip file because link is not correct
#remove #38; in link tag to get correct link
#another approach
#just open url on browser
#browser will download
#http://pythonconquerstheuniverse.wordpress.com/2010/10/16/how-to-open-a-web-browser-from-python/
"""
torrentURL=d.entries[0].link
correctURL=torrentURL.replace("#38;","")
import webbrowser
new = 2 # open in a new tab, if possible
webbrowser.open(correctURL,new=new)
"""
#count length of items
#http://stackoverflow.com/questions/6483851/is-there-an-elegant-way-to-count-tag-elements-in-a-xml-file-using-lxml-in-python
#http://stackoverflow.com/questions/13355984/get-errors-when-import-lxml-etree-to-python
"""
from lxml import etree
doc = lxml.etree.parse(rss_dengkun)
count = doc.xpath('count(//item)')
print count
"""
#rss
#channel
#title
#link
#atom:link rel type
#description
#item
#title
#category
#link
#guid
#description
#pubDate
#/item
#/channel
#/rss
#with minidom
"""
import xml.dom.minidom
from xml.dom.minidom import Node
dom = xml.dom.minidom.parse("docmap.xml")
def getChildrenByTitle(node):
for child in node.childNodes:
if child.localName=='Title':
yield child
Topic=dom.getElementsByTagName('Topic')
for node in Topic:
alist=getChildrenByTitle(node)
for a in alist:
# Title= a.firstChild.data
Title= a.childNodes[0].nodeValue
print Title
"""
#time
#time.strptime(picturetime, "%I:%M:%S %p")
"""
if os.path.lexists(dest):
os.remove(dest)
os.symlink(src,dest)
"""
#download
#https://code.google.com/p/metalink-library/
#https://github.com/danfolkes/Magnet2Torrent
#http://dan.folkes.me/2012/04/19/converting-a-magnet-link-into-a-torrent/
#credits:
#http://askubuntu.com/questions/303478/torrents-downloaded-using-python-urllib2-fail-to-open-in-bittorrent-client
|
weric/NyaaSeParser
|
get_copy.py
|
Python
|
apache-2.0
| 3,870
|
#!/bin/python3
import sys
import itertools
def is_beautiful(s):
"""
Are the splits in `s` beautiful?
"""
leading_zeros = [d[0] == '0' for d in s]
increasing = [int(s[i - 1]) + 1 == int(s[i]) for i in range(1, len(s))]
res = not any(leading_zeros) and all(increasing)
if res: # match, look for index
return (res, s[0])
else:
return (res, None)
def separate_string(s, index):
"""
:param s:
:param index: indices at which to split the string
:return:
>>> separate_string('abcd', [1, 2])
['ab', 'c', 'd']
>>> separate_string('abcd', [0])
['a', 'bcd']
"""
# string from start to first index, rest of indices, then from last index til the end
# 'abcdefgh', [0, 3] -> Split after 0, and after 3
# 'a', 'bcd', ...
# print(s[:index[0] + 1])
# s_list = [s[:index[0]]]
# s_list = [s[:index[0] + 1]]
s_list = [(s[:index[0] + 1])] + \
[s[index[i] + 1: index[i + 1] + 1] for i in range(len(index) - 1)] + \
[s[index[-1] + 1:]]
# s_list.extend([s[index[i] + 1: index[i + 1] + 1] for i in range(len(index) - 1)])
# s_list.append(s[index[-1] + 1:])
return s_list
def get_indices(s):
"""
Gets all combinations of indices after which to split the string `s`
e.g. "abc" -> [[0, 1], [1]] means split "abc" after index 0 and 1, or after index 1
:param s: string to split
:return: list of list of indices at which to split `s`
>>> get_indices('abc')
"""
bin_strings = itertools.product([True, False], repeat=len(s) - 1)
position_list = []
for bin_str in bin_strings:
position_list.append([index for index, bool in enumerate(bin_str) if bool])
return [p for p in position_list if len(p) > 0]
q = int(input().strip())
for a0 in range(q):
s = input().strip()
indices = get_indices(s)
li = [separate_string(s, i) for i in indices]
res = False
is_beautiful_res = [is_beautiful(l) for l in li]
if len(is_beautiful_res) > 0:
is_beautiful_bool, first_digit_list = zip(*is_beautiful_res)
try:
match_index = is_beautiful_bool.index(True)
print('YES %s' % first_digit_list[match_index])
except ValueError:
print('NO')
else:
print('NO')
# print('YES' if any([r[0] for r in is_beautiful_res]) else 'NO')
|
JuanCTorres/interview-prep-solutions
|
hackerrank/src/algorithms/strings/separate_the_numbers.py
|
Python
|
mit
| 2,395
|
"""
Manage ECM Accounts.
"""
import collections
import shellish
from . import base
class Formatter(object):
terse_table_fields = (
(lambda x: x['name'], 'Name'),
(lambda x: x['id'], 'ID'),
(lambda x: len(x['groups']), 'Groups'),
(lambda x: x['customer']['customer_name'], 'Customer'),
(lambda x: x['customer']['contact_name'], 'Contact')
)
verbose_table_fields = (
(lambda x: x['name'], 'Name'),
(lambda x: x['id'], 'ID'),
(lambda x: len(x['groups']), 'Groups'),
(lambda x: x['routers_count'], 'Routers'),
(lambda x: x['user_profiles_count'], 'Users'),
(lambda x: x['subaccounts_count'], 'Subaccounts'),
(lambda x: x['customer']['customer_name'], 'Customer'),
(lambda x: x['customer']['contact_name'], 'Contact')
)
expands = [
'groups',
'customer',
]
def setup_args(self, parser):
self.add_argument('-v', '--verbose', action='store_true')
self.inject_table_factory()
super().setup_args(parser)
def prerun(self, args):
self.verbose = args.verbose
if args.verbose:
self.formatter = self.verbose_formatter
self.table_fields = self.verbose_table_fields
else:
self.formatter = self.terse_formatter
self.table_fields = self.terse_table_fields
self.table = self.make_table(headers=[x[1] for x in self.table_fields],
accessors=[self.safe_get(x[0], '')
for x in self.table_fields])
super().prerun(args)
def safe_get(self, func, default=None):
def fn(x):
try:
return func(x)
except:
return default
return fn
def bundle(self, account):
if self.verbose:
counts = ['routers', 'user_profiles', 'subaccounts']
for x in counts:
n = self.api.get(urn=account[x], count='id')[0]['id_count']
account['%s_count' % x] = n
account['groups_count'] = len(account['groups'])
return account
def terse_formatter(self, account):
return '%(name)s (id:%(id)s)' % account
def verbose_formatter(self, account):
return '%(name)s (id:%(id)s, routers:%(routers_count)d ' \
'groups:%(groups_count)d, users:%(user_profiles_count)d, ' \
'subaccounts:%(subaccounts_count)d)' % account
class Tree(Formatter, base.ECMCommand):
""" Show account Tree """
name = 'tree'
def setup_args(self, parser):
self.add_account_argument(nargs='?')
super().setup_args(parser)
def run(self, args):
if args.ident:
root_id = self.api.get_by_id_or_name('accounts', args.ident)['id']
else:
root_id = None
self.show_tree(root_id)
def show_tree(self, root_id):
""" Huge page size for accounts costs nearly nothing, but api calls
are extremely expensive. The fastest and best way to get accounts and
their descendants is to get massive pages from the root level, which
already include descendants; Build our own tree and do account level
filtering client-side. This theory is proven as of ECM 7-18-2015. """
expands = ','.join(self.expands)
accounts_pager = self.api.get_pager('accounts', expand=expands,
page_size=10000)
accounts = dict((x['resource_uri'], x) for x in accounts_pager)
root_ref = root = {"node": shellish.TreeNode('root')}
for uri, x in accounts.items():
parent = accounts.get(x['account'], root)
if 'node' not in parent:
parent['node'] = shellish.TreeNode(parent)
if 'node' not in x:
x['node'] = shellish.TreeNode(x)
parent['node'].children.append(x['node'])
if root_id is not None and x['id'] == root_id:
root_ref = x
if root_ref == root:
root_ref = root['node'].children
else:
root_ref = [root_ref['node']]
formatter = lambda x: self.formatter(self.bundle(x.value))
t = shellish.Tree(formatter=formatter,
sort_key=lambda x: x.value['id'])
for x in t.render(root_ref):
print(x)
class List(Formatter, base.ECMCommand):
""" List accounts. """
name = 'ls'
def setup_args(self, parser):
self.add_account_argument('idents', nargs='*')
super().setup_args(parser)
def run(self, args):
expands = ','.join(self.expands)
if args.idents:
accounts = [self.api.get_by_id_or_name('accounts', x,
expand=expands)
for x in args.idents]
else:
accounts = self.api.get_pager('accounts', expand=expands)
with self.table as t:
t.print(map(self.bundle, accounts))
class Create(base.ECMCommand):
""" Create account """
name = 'create'
def setup_args(self, parser):
self.add_account_argument('-p', '--parent',
metavar="PARENT_ACCOUNT_ID_OR_NAME")
self.add_argument('name', metavar='NAME')
def run(self, args):
new_account = {
"name": args.name
}
if args.parent:
account = self.api.get_by_id_or_name('accounts', args.parent)
if not account:
raise SystemExit("Account not found: %s" % args.parent)
new_account['account'] = account['resource_uri']
self.api.post('accounts', new_account)
class Remove(base.ECMCommand):
""" Remove an account """
name = 'rm'
use_pager = False
def setup_args(self, parser):
self.add_account_argument('idents', nargs='+')
self.add_argument('-f', '--force', action='store_true',
help='Do not prompt for confirmation')
self.add_argument('-r', '--recursive', action='store_true',
help='Remove all subordinate resources too.')
def run(self, args):
for x in args.idents:
account = self.api.get_by_id_or_name('accounts', x)
if args.recursive:
resources = self.get_subordinates(account)
else:
resources = {}
if not args.force:
if resources:
r = resources
self.confirm('Confirm removal of "%s" along with %d '
'subaccounts, %d groups, %d routers and %d '
'users' %
(account['name'], len(r['subaccounts']),
len(r['groups']), len(r['routers']),
len(r['users'])))
else:
self.confirm('Confirm account removal: %s (%s)' % (
account['name'], account['id']))
if resources:
for res in ('users', 'routers', 'groups', 'subaccounts'):
for x in resources[res]:
self.api.delete(urn=x)
self.api.delete('accounts', account['id'])
def get_subordinates(self, account):
""" Recursively look for resources underneath this account. """
resources = collections.defaultdict(list)
for x in self.api.get_pager(urn=account['subaccounts']):
for res, items in self.get_subordinates(x).items():
resources[res].extend(items)
resources['subaccounts'].append(x['resource_uri'])
for x in self.api.get_pager(urn=account['groups']):
resources['groups'].append(x['resource_uri'])
for x in self.api.get_pager(urn=account['routers']):
resources['routers'].append(x['resource_uri'])
for x in self.api.get_pager(urn=account['user_profiles']):
resources['users'].append(x['user'])
return resources
class Move(base.ECMCommand):
""" Move account to new parent account """
name = 'mv'
def setup_args(self, parser):
self.add_account_argument()
self.add_account_argument('new_parent',
metavar='NEW_PARENT_ID_OR_NAME')
def run(self, args):
account = self.api.get_by_id_or_name('accounts', args.ident)
new_parent = self.api.get_by_id_or_name('accounts', args.new_parent)
self.api.put('accounts', account['id'],
{"account": new_parent['resource_uri']})
class Rename(base.ECMCommand):
""" Rename an account """
name = 'rename'
def setup_args(self, parser):
self.add_account_argument()
self.add_argument('new_name', metavar='NEW_NAME')
def run(self, args):
account = self.api.get_by_id_or_name('accounts', args.ident)
self.api.put('accounts', account['id'], {"name": args.new_name})
class Search(Formatter, base.ECMCommand):
""" Search for account(s) """
name = 'search'
def setup_args(self, parser):
expands = ','.join(self.expands)
searcher = self.make_searcher('accounts', ['name'], expand=expands)
self.lookup = searcher.lookup
self.add_search_argument(searcher)
super().setup_args(parser)
def run(self, args):
results = self.lookup(args.search)
if not results:
raise SystemExit("No results for: %s" % ' '.join(args.search))
with self.table as t:
t.print(map(self.bundle, results))
class Accounts(base.ECMCommand):
""" Manage ECM Accounts. """
name = 'accounts'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_subcommand(List, default=True)
self.add_subcommand(Tree)
self.add_subcommand(Create)
self.add_subcommand(Remove)
self.add_subcommand(Move)
self.add_subcommand(Rename)
self.add_subcommand(Search)
command_classes = [Accounts]
|
mayfield/ecmcli
|
ecmcli/commands/accounts.py
|
Python
|
mit
| 10,177
|
from django.template import RequestContext
from django.utils.timezone import now
from django.views.decorators.cache import cache_control
from bulbs.content.views import ContentListView
class RSSView(ContentListView):
"""Really simply, this syndicates Content."""
template_name = "feeds/rss.xml"
paginate_by = 20
feed_title = "RSS Feed"
utm_params = "utm_medium=RSS&utm_campaign=feeds"
def get_template_names(self):
return ["feeds/rss.xml", "feeds/_rss.xml"]
@cache_control(max_age=600)
def get(self, request, *args, **kwargs):
response = super(RSSView, self).get(request, *args, **kwargs)
response["Content-Type"] = "application/rss+xml"
return response
def get_queryset(self):
return super(RSSView, self).get_queryset().full().execute()[:self.paginate_by]
def get_context_data(self, *args, **kwargs):
context = super(RSSView, self).get_context_data(*args, **kwargs)
context["full"] = (self.request.GET.get("full", "false").lower() == "true")
context["images"] = (self.request.GET.get("images", "false").lower() == "true")
context["build_date"] = now()
context["title"] = self.feed_title
context["feed_url"] = self.request.build_absolute_uri()
context["search_url"] = self.request.build_absolute_uri(
u"/search?%s" % self.request.META["QUERY_STRING"])
# OK, so this is kinda brutal. Stay with me here.
for content in context["page_obj"].object_list:
feed_path = content.get_absolute_url() + "?" + self.utm_params
content.feed_url = self.request.build_absolute_uri(feed_path)
return RequestContext(self.request, context)
|
pombredanne/django-bulbs
|
bulbs/feeds/views.py
|
Python
|
mit
| 1,729
|
#!/usr/bin/env python
# Copyright (C) 2009-2010:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestSnapshot(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_snapshot.cfg')
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("GotSNAP")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("GotSNAP", "SRV")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(5, [[host, 2, 'DOWN'], [svc, 2, 'BAD | value1=0 value2=0']])
self.assertEqual('DOWN', host.state)
self.assertEqual('HARD', host.state_type)
self.assert_any_log_match('HOST SNAPSHOT.*')
self.assert_log_match(2, 'HOST SNAPSHOT.*')
self.assert_any_log_match('SERVICE SNAPSHOT.*')
self.assert_log_match(4, 'SERVICE SNAPSHOT.*')
self.show_and_clear_logs()
broks = self.sched.broks.values()
[b.prepare() for b in broks]
types = set([b.type for b in broks])
print types
self.assertIn('service_snapshot', types)
self.assertIn('host_snapshot', types)
if __name__ == '__main__':
unittest.main()
|
rledisez/shinken
|
test/test_snapshot.py
|
Python
|
agpl-3.0
| 2,317
|
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of visclaw.data.ClawPlotData.
Output: a modified version of plotdata.
"""
from clawpack.visclaw import colormaps
plotdata.clearfigures() # clear any old figures,axes,items data
# Figure for pcolor plot
plotfigure = plotdata.new_plotfigure(name='pcolor1', figno=0)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [0,1]
plotaxes.ylimits = [0,1]
plotaxes.title = 'Solution'
plotaxes.scaled = True
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = 1
plotitem.pcolor_cmap = colormaps.red_yellow_blue
plotitem.pcolor_cmin = -1.5
plotitem.pcolor_cmax = 1.5
plotitem.add_colorbar = True
plotitem.amr_celledges_show = [0]
plotitem.amr_patchedges_show = [0]
# Figure for pcolor plot
plotfigure = plotdata.new_plotfigure(name='pcolor2', figno=10)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [0,1]
plotaxes.ylimits = [0,1]
plotaxes.title = 'Solution'
plotaxes.scaled = True
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = 2
plotitem.pcolor_cmap = colormaps.red_yellow_blue
plotitem.pcolor_cmin = -1.
plotitem.pcolor_cmax = 1.
plotitem.add_colorbar = True
plotitem.amr_celledges_show = [0]
plotitem.amr_patchedges_show = [0]
#-----------------------------------------
# Figures for gauges
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='q', figno=300, \
type='each_gauge')
plotfigure.clf_each_gauge = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = 'auto'
plotaxes.ylimits = 'auto'
plotaxes.title = 'q'
# Plot q as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 0
plotitem.plotstyle = 'b-'
# Parameters used only when creating html and/or latex hardcopy
# e.g., via visclaw.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.html_movie = 'JSAnimation' # new style, or "4.x" for old style
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
return plotdata
# To plot gauge locations on pcolor or contour plot, use this as
# an afteraxis function:
def addgauges(current_data):
from clawpack.visclaw import gaugetools
gaugetools.plot_gauge_locations(current_data.plotdata, \
gaugenos='all', format_string='ko', add_labels=True)
|
torrilhon/MyClawpack
|
examples/stokes_2d_drivencavity/setplot.py
|
Python
|
gpl-3.0
| 3,642
|
from typing import cast
from hedgehog.utils import protobuf
from ..proto import hedgehog_pb2
__all__ = ['Message', 'SimpleMessage', 'ContainerMessage', 'RequestMsg', 'ReplyMsg']
class Message(protobuf.Message):
is_async = False
@classmethod
def msg_name(cls):
module, name = cls.__module__, cls.__name__
module = module[module.rindex('.') + 1:]
return f'{module}.{name}'
def __repr__(self):
field_pairs = ((field, getattr(self, field)) for field in self.meta.fields)
field_reprs = ', '.join(f'{field}={value!r}' for field, value in field_pairs)
return f'{self.msg_name()}({field_reprs})'
class SimpleMessage(Message, protobuf.SimpleMessageMixin):
pass
class ContainerMessage(protobuf.ContainerMessage):
def parse(self, data: bytes) -> Message:
return cast(Message, super(ContainerMessage, self).parse(data))
RequestMsg = ContainerMessage(hedgehog_pb2.HedgehogMessage)
ReplyMsg = ContainerMessage(hedgehog_pb2.HedgehogMessage)
|
PRIArobotics/HedgehogProtocol
|
hedgehog/protocol/messages/__init__.py
|
Python
|
agpl-3.0
| 1,018
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
CONF = cfg.CONF
class PluginBase(metaclass=abc.ABCMeta):
"""Base class for all VIF plugins."""
# Override to provide a tuple of oslo_config.Opt instances for
# the plugin config parameters
CONFIG_OPTS = ()
def __init__(self, config):
"""
Initialize the plugin object with the provided config
:param config: ``oslo_config.ConfigOpts.GroupAttr`` instance:
"""
self.config = config
@abc.abstractmethod
def describe(self):
"""
Return an object that describes the plugin's supported vif types and
the earliest/latest known VIF object versions.
:returns: A ``os_vif.objects.host_info.HostPluginInfo`` instance
"""
@abc.abstractmethod
def plug(self, vif, instance_info):
"""
Given a model of a VIF, perform operations to plug the VIF properly.
:param vif: ``os_vif.objects.vif.VIFBase`` object.
:param instance_info: ``os_vif.objects.instance_info.InstanceInfo``
object.
:raises ``processutils.ProcessExecutionError``. Plugins implementing
this method should let `processutils.ProcessExecutionError`
bubble up.
"""
@abc.abstractmethod
def unplug(self, vif, instance_info):
"""
Given a model of a VIF, perform operations to unplug the VIF properly.
:param vif: ``os_vif.objects.vif.VIFBase`` object.
:param instance_info: ``os_vif.objects.instance_info.InstanceInfo``
object.
:raises ``processutils.ProcessExecutionError``. Plugins implementing
this method should let ``processutils.ProcessExecutionError``
bubble up.
"""
@classmethod
def load(cls, plugin_name):
"""
Load a plugin, registering its configuration options
:param plugin_name: the name of the plugin extension
:returns: an initialized instance of the class
"""
cfg_group_name = "os_vif_" + plugin_name
cfg_opts = getattr(cls, "CONFIG_OPTS")
cfg_vals = None
if cfg_opts and len(cfg_opts) > 0:
cfg_group = cfg.OptGroup(
cfg_group_name,
"os-vif plugin %s options" % plugin_name)
CONF.register_opts(cfg_opts, group=cfg_group)
cfg_vals = getattr(CONF, cfg_group_name)
return cls(cfg_vals)
|
openstack/os-vif
|
os_vif/plugin.py
|
Python
|
apache-2.0
| 3,032
|
import mock, pytest, requests
from pytest_mock import mocker
from holiday_bot import nlp, slackbot_settings
def test_attempts_request(mocker):
mocker.patch('requests.get')
nlp.query(3, 'Text to parse...')
requests.get.assert_called_once_with('https://api.api.ai/api/query', headers={'Authorization': 'Bearer ' + slackbot_settings.API_AI_KEY}, params={'v': 20150910, 'query': 'Text to parse...', 'lang': 'en', 'sessionId': 3})
|
gytdau/holiday-thingy
|
tests/test_nlp.py
|
Python
|
mit
| 440
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import six
# import types so that we can reference ListType in sphinx param declarations.
# We can't just use list, because sphinx gets confused by
# openstack.resource.Resource.list and openstack.resource2.Resource.list
import types # noqa
import warnings
import dogpile.cache
import munch
import requests.models
import requestsexceptions
import keystoneauth1.exceptions
import keystoneauth1.session
from openstack import _log
from openstack.cloud import exc
from openstack.cloud import _floating_ip
from openstack.cloud import _object_store
from openstack.cloud import meta
from openstack.cloud import _utils
import openstack.config
from openstack.config import cloud_region as cloud_region_mod
from openstack import proxy
DEFAULT_SERVER_AGE = 5
DEFAULT_PORT_AGE = 5
DEFAULT_FLOAT_AGE = 5
_CONFIG_DOC_URL = _floating_ip._CONFIG_DOC_URL
DEFAULT_OBJECT_SEGMENT_SIZE = _object_store.DEFAULT_OBJECT_SEGMENT_SIZE
# This halves the current default for Swift
DEFAULT_MAX_FILE_SIZE = _object_store.DEFAULT_MAX_FILE_SIZE
OBJECT_CONTAINER_ACLS = _object_store.OBJECT_CONTAINER_ACLS
class _OpenStackCloudMixin(object):
"""Represent a connection to an OpenStack Cloud.
OpenStackCloud is the entry point for all cloud operations, regardless
of which OpenStack service those operations may ultimately come from.
The operations on an OpenStackCloud are resource oriented rather than
REST API operation oriented. For instance, one will request a Floating IP
and that Floating IP will be actualized either via neutron or via nova
depending on how this particular cloud has decided to arrange itself.
:param bool strict: Only return documented attributes for each resource
as per the Data Model contract. (Default False)
"""
_OBJECT_MD5_KEY = 'x-object-meta-x-sdk-md5'
_OBJECT_SHA256_KEY = 'x-object-meta-x-sdk-sha256'
_OBJECT_AUTOCREATE_KEY = 'x-object-meta-x-sdk-autocreated'
_OBJECT_AUTOCREATE_CONTAINER = 'images'
# NOTE(shade) shade keys were x-object-meta-x-shade-md5 - we need to check
# those in freshness checks so that a shade->sdk transition
# doesn't result in a re-upload
_SHADE_OBJECT_MD5_KEY = 'x-object-meta-x-shade-md5'
_SHADE_OBJECT_SHA256_KEY = 'x-object-meta-x-shade-sha256'
_SHADE_OBJECT_AUTOCREATE_KEY = 'x-object-meta-x-shade-autocreated'
def __init__(self):
super(_OpenStackCloudMixin, self).__init__()
self.log = _log.setup_logging('openstack')
self.name = self.config.name
self.auth = self.config.get_auth_args()
self.default_interface = self.config.get_interface()
self.force_ipv4 = self.config.force_ipv4
(self.verify, self.cert) = self.config.get_requests_verify_args()
# Turn off urllib3 warnings about insecure certs if we have
# explicitly configured requests to tell it we do not want
# cert verification
if not self.verify:
self.log.debug(
"Turning off Insecure SSL warnings since verify=False")
category = requestsexceptions.InsecureRequestWarning
if category:
# InsecureRequestWarning references a Warning class or is None
warnings.filterwarnings('ignore', category=category)
self._disable_warnings = {}
cache_expiration_time = int(self.config.get_cache_expiration_time())
cache_class = self.config.get_cache_class()
cache_arguments = self.config.get_cache_arguments()
self._resource_caches = {}
if cache_class != 'dogpile.cache.null':
self.cache_enabled = True
self._cache = self._make_cache(
cache_class, cache_expiration_time, cache_arguments)
expirations = self.config.get_cache_expirations()
for expire_key in expirations.keys():
# Only build caches for things we have list operations for
if getattr(
self, 'list_{0}'.format(expire_key), None):
self._resource_caches[expire_key] = self._make_cache(
cache_class, expirations[expire_key], cache_arguments)
self._SERVER_AGE = DEFAULT_SERVER_AGE
self._PORT_AGE = DEFAULT_PORT_AGE
self._FLOAT_AGE = DEFAULT_FLOAT_AGE
else:
self.cache_enabled = False
def _fake_invalidate(unused):
pass
class _FakeCache(object):
def invalidate(self):
pass
# Don't cache list_servers if we're not caching things.
# Replace this with a more specific cache configuration
# soon.
self._SERVER_AGE = 0
self._PORT_AGE = 0
self._FLOAT_AGE = 0
self._cache = _FakeCache()
# Undecorate cache decorated methods. Otherwise the call stacks
# wind up being stupidly long and hard to debug
for method in _utils._decorated_methods:
meth_obj = getattr(self, method, None)
if not meth_obj:
continue
if (hasattr(meth_obj, 'invalidate')
and hasattr(meth_obj, 'func')):
new_func = functools.partial(meth_obj.func, self)
new_func.invalidate = _fake_invalidate
setattr(self, method, new_func)
# If server expiration time is set explicitly, use that. Otherwise
# fall back to whatever it was before
self._SERVER_AGE = self.config.get_cache_resource_expiration(
'server', self._SERVER_AGE)
self._PORT_AGE = self.config.get_cache_resource_expiration(
'port', self._PORT_AGE)
self._FLOAT_AGE = self.config.get_cache_resource_expiration(
'floating_ip', self._FLOAT_AGE)
self._container_cache = dict()
self._file_hash_cache = dict()
# self.__pool_executor = None
self._raw_clients = {}
self._local_ipv6 = (
_utils.localhost_supports_ipv6() if not self.force_ipv4 else False)
def connect_as(self, **kwargs):
"""Make a new OpenStackCloud object with new auth context.
Take the existing settings from the current cloud and construct a new
OpenStackCloud object with some of the auth settings overridden. This
is useful for getting an object to perform tasks with as another user,
or in the context of a different project.
.. code-block:: python
conn = openstack.connect(cloud='example')
# Work normally
servers = conn.list_servers()
conn2 = conn.connect_as(username='different-user', password='')
# Work as different-user
servers = conn2.list_servers()
:param kwargs: keyword arguments can contain anything that would
normally go in an auth dict. They will override the same
settings from the parent cloud as appropriate. Entries
that do not want to be overridden can be ommitted.
"""
if self.config._openstack_config:
config = self.config._openstack_config
else:
# TODO(mordred) Replace this with from_session
config = openstack.config.OpenStackConfig(
app_name=self.config._app_name,
app_version=self.config._app_version,
load_yaml_config=False)
params = copy.deepcopy(self.config.config)
# Remove profile from current cloud so that overridding works
params.pop('profile', None)
# Utility function to help with the stripping below.
def pop_keys(params, auth, name_key, id_key):
if name_key in auth or id_key in auth:
params['auth'].pop(name_key, None)
params['auth'].pop(id_key, None)
# If there are user, project or domain settings in the incoming auth
# dict, strip out both id and name so that a user can say:
# cloud.connect_as(project_name='foo')
# and have that work with clouds that have a project_id set in their
# config.
for prefix in ('user', 'project'):
if prefix == 'user':
name_key = 'username'
else:
name_key = 'project_name'
id_key = '{prefix}_id'.format(prefix=prefix)
pop_keys(params, kwargs, name_key, id_key)
id_key = '{prefix}_domain_id'.format(prefix=prefix)
name_key = '{prefix}_domain_name'.format(prefix=prefix)
pop_keys(params, kwargs, name_key, id_key)
for key, value in kwargs.items():
params['auth'][key] = value
cloud_region = config.get_one(**params)
# Attach the discovery cache from the old session so we won't
# double discover.
cloud_region._discovery_cache = self.session._discovery_cache
# Override the cloud name so that logging/location work right
cloud_region._name = self.name
cloud_region.config['profile'] = self.name
# Use self.__class__ so that we return whatever this if, like if it's
# a subclass in the case of shade wrapping sdk.
return self.__class__(config=cloud_region)
def connect_as_project(self, project):
"""Make a new OpenStackCloud object with a new project.
Take the existing settings from the current cloud and construct a new
OpenStackCloud object with the project settings overridden. This
is useful for getting an object to perform tasks with as another user,
or in the context of a different project.
.. code-block:: python
cloud = openstack.connect(cloud='example')
# Work normally
servers = cloud.list_servers()
cloud2 = cloud.connect_as_project('different-project')
# Work in different-project
servers = cloud2.list_servers()
:param project: Either a project name or a project dict as returned by
`list_projects`.
"""
auth = {}
if isinstance(project, dict):
auth['project_id'] = project.get('id')
auth['project_name'] = project.get('name')
if project.get('domain_id'):
auth['project_domain_id'] = project['domain_id']
else:
auth['project_name'] = project
return self.connect_as(**auth)
def global_request(self, global_request_id):
"""Make a new Connection object with a global request id set.
Take the existing settings from the current Connection and construct a
new Connection object with the global_request_id overridden.
.. code-block:: python
from oslo_context import context
cloud = openstack.connect(cloud='example')
# Work normally
servers = cloud.list_servers()
cloud2 = cloud.global_request(context.generate_request_id())
# cloud2 sends all requests with global_request_id set
servers = cloud2.list_servers()
Additionally, this can be used as a context manager:
.. code-block:: python
from oslo_context import context
c = openstack.connect(cloud='example')
# Work normally
servers = c.list_servers()
with c.global_request(context.generate_request_id()) as c2:
# c2 sends all requests with global_request_id set
servers = c2.list_servers()
:param global_request_id: The `global_request_id` to send.
"""
params = copy.deepcopy(self.config.config)
cloud_region = cloud_region_mod.from_session(
session=self.session,
app_name=self.config._app_name,
app_version=self.config._app_version,
discovery_cache=self.session._discovery_cache,
**params)
# Override the cloud name so that logging/location work right
cloud_region._name = self.name
cloud_region.config['profile'] = self.name
# Use self.__class__ so that we return whatever this is, like if it's
# a subclass in the case of shade wrapping sdk.
new_conn = self.__class__(config=cloud_region)
new_conn.set_global_request_id(global_request_id)
return new_conn
def _make_cache(self, cache_class, expiration_time, arguments):
return dogpile.cache.make_region(
function_key_generator=self._make_cache_key
).configure(
cache_class,
expiration_time=expiration_time,
arguments=arguments)
def _make_cache_key(self, namespace, fn):
fname = fn.__name__
if namespace is None:
name_key = self.name
else:
name_key = '%s:%s' % (self.name, namespace)
def generate_key(*args, **kwargs):
arg_key = ','.join(args)
kw_keys = sorted(kwargs.keys())
kwargs_key = ','.join(
['%s:%s' % (k, kwargs[k]) for k in kw_keys if k != 'cache'])
ans = "_".join(
[str(name_key), fname, arg_key, kwargs_key])
return ans
return generate_key
def _get_cache(self, resource_name):
if resource_name and resource_name in self._resource_caches:
return self._resource_caches[resource_name]
else:
return self._cache
def _get_major_version_id(self, version):
if isinstance(version, int):
return version
elif isinstance(version, six.string_types + (tuple,)):
return int(version[0])
return version
def _get_versioned_client(
self, service_type, min_version=None, max_version=None):
config_version = self.config.get_api_version(service_type)
config_major = self._get_major_version_id(config_version)
max_major = self._get_major_version_id(max_version)
min_major = self._get_major_version_id(min_version)
# TODO(shade) This should be replaced with use of Connection. However,
# we need to find a sane way to deal with this additional
# logic - or we need to give up on it. If we give up on it,
# we need to make sure we can still support it in the shade
# compat layer.
# NOTE(mordred) This logic for versions is slightly different
# than the ksa Adapter constructor logic. openstack.cloud knows the
# versions it knows, and uses them when it detects them. However, if
# a user requests a version, and it's not found, and a different one
# openstack.cloud does know about is found, that's a warning in
# openstack.cloud.
if config_version:
if min_major and config_major < min_major:
raise exc.OpenStackCloudException(
"Version {config_version} requested for {service_type}"
" but shade understands a minimum of {min_version}".format(
config_version=config_version,
service_type=service_type,
min_version=min_version))
elif max_major and config_major > max_major:
raise exc.OpenStackCloudException(
"Version {config_version} requested for {service_type}"
" but openstack.cloud understands a maximum of"
" {max_version}".format(
config_version=config_version,
service_type=service_type,
max_version=max_version))
request_min_version = config_version
request_max_version = '{version}.latest'.format(
version=config_major)
adapter = proxy._ShadeAdapter(
session=self.session,
service_type=self.config.get_service_type(service_type),
service_name=self.config.get_service_name(service_type),
interface=self.config.get_interface(service_type),
endpoint_override=self.config.get_endpoint(service_type),
region_name=self.config.get_region_name(service_type),
statsd_prefix=self.config.get_statsd_prefix(),
statsd_client=self.config.get_statsd_client(),
prometheus_counter=self.config.get_prometheus_counter(),
prometheus_histogram=self.config.get_prometheus_histogram(),
min_version=request_min_version,
max_version=request_max_version)
if adapter.get_endpoint():
return adapter
adapter = proxy._ShadeAdapter(
session=self.session,
service_type=self.config.get_service_type(service_type),
service_name=self.config.get_service_name(service_type),
interface=self.config.get_interface(service_type),
endpoint_override=self.config.get_endpoint(service_type),
region_name=self.config.get_region_name(service_type),
min_version=min_version,
max_version=max_version)
# data.api_version can be None if no version was detected, such
# as with neutron
api_version = adapter.get_api_major_version(
endpoint_override=self.config.get_endpoint(service_type))
api_major = self._get_major_version_id(api_version)
# If we detect a different version that was configured, warn the user.
# shade still knows what to do - but if the user gave us an explicit
# version and we couldn't find it, they may want to investigate.
if api_version and config_version and (api_major != config_major):
warning_msg = (
'{service_type} is configured for {config_version}'
' but only {api_version} is available. shade is happy'
' with this version, but if you were trying to force an'
' override, that did not happen. You may want to check'
' your cloud, or remove the version specification from'
' your config.'.format(
service_type=service_type,
config_version=config_version,
api_version='.'.join([str(f) for f in api_version])))
self.log.debug(warning_msg)
warnings.warn(warning_msg)
return adapter
# TODO(shade) This should be replaced with using openstack Connection
# object.
def _get_raw_client(
self, service_type, api_version=None, endpoint_override=None):
return proxy._ShadeAdapter(
session=self.session,
service_type=self.config.get_service_type(service_type),
service_name=self.config.get_service_name(service_type),
interface=self.config.get_interface(service_type),
endpoint_override=self.config.get_endpoint(
service_type) or endpoint_override,
region_name=self.config.get_region_name(service_type))
def _is_client_version(self, client, version):
client_name = '_{client}_client'.format(
client=client.replace('-', '_'))
client = getattr(self, client_name)
return client._version_matches(version)
@property
def _application_catalog_client(self):
if 'application-catalog' not in self._raw_clients:
self._raw_clients['application-catalog'] = self._get_raw_client(
'application-catalog')
return self._raw_clients['application-catalog']
@property
def _database_client(self):
if 'database' not in self._raw_clients:
self._raw_clients['database'] = self._get_raw_client('database')
return self._raw_clients['database']
@property
def _raw_image_client(self):
if 'raw-image' not in self._raw_clients:
image_client = self._get_raw_client('image')
self._raw_clients['raw-image'] = image_client
return self._raw_clients['raw-image']
def pprint(self, resource):
"""Wrapper around pprint that groks munch objects"""
# import late since this is a utility function
import pprint
new_resource = _utils._dictify_resource(resource)
pprint.pprint(new_resource)
def pformat(self, resource):
"""Wrapper around pformat that groks munch objects"""
# import late since this is a utility function
import pprint
new_resource = _utils._dictify_resource(resource)
return pprint.pformat(new_resource)
@property
def _keystone_catalog(self):
return self.session.auth.get_access(self.session).service_catalog
@property
def service_catalog(self):
return self._keystone_catalog.catalog
def endpoint_for(self, service_type, interface=None, region_name=None):
"""Return the endpoint for a given service.
Respects config values for Connection, including
``*_endpoint_override``. For direct values from the catalog
regardless of overrides, see
:meth:`~openstack.config.cloud_region.CloudRegion.get_endpoint_from_catalog`
:param service_type: Service Type of the endpoint to search for.
:param interface:
Interface of the endpoint to search for. Optional, defaults to
the configured value for interface for this Connection.
:param region_name:
Region Name of the endpoint to search for. Optional, defaults to
the configured value for region_name for this Connection.
:returns: The endpoint of the service, or None if not found.
"""
endpoint_override = self.config.get_endpoint(service_type)
if endpoint_override:
return endpoint_override
return self.config.get_endpoint_from_catalog(
service_type=service_type,
interface=interface,
region_name=region_name)
@property
def auth_token(self):
# Keystone's session will reuse a token if it is still valid.
# We don't need to track validity here, just get_token() each time.
return self.session.get_token()
@property
def current_user_id(self):
"""Get the id of the currently logged-in user from the token."""
return self.session.auth.get_access(self.session).user_id
@property
def current_project_id(self):
"""Get the current project ID.
Returns the project_id of the current token scope. None means that
the token is domain scoped or unscoped.
:raises keystoneauth1.exceptions.auth.AuthorizationFailure:
if a new token fetch fails.
:raises keystoneauth1.exceptions.auth_plugins.MissingAuthPlugin:
if a plugin is not available.
"""
return self.session.get_project_id()
@property
def current_project(self):
"""Return a ``munch.Munch`` describing the current project"""
return self._get_project_info()
def _get_project_info(self, project_id=None):
project_info = munch.Munch(
id=project_id,
name=None,
domain_id=None,
domain_name=None,
)
if not project_id or project_id == self.current_project_id:
# If we don't have a project_id parameter, it means a user is
# directly asking what the current state is.
# Alternately, if we have one, that means we're calling this
# from within a normalize function, which means the object has
# a project_id associated with it. If the project_id matches
# the project_id of our current token, that means we can supplement
# the info with human readable info about names if we have them.
# If they don't match, that means we're an admin who has pulled
# an object from a different project, so adding info from the
# current token would be wrong.
auth_args = self.config.config.get('auth', {})
project_info['id'] = self.current_project_id
project_info['name'] = auth_args.get('project_name')
project_info['domain_id'] = auth_args.get('project_domain_id')
project_info['domain_name'] = auth_args.get('project_domain_name')
return project_info
@property
def current_location(self):
"""Return a ``munch.Munch`` explaining the current cloud location."""
return self._get_current_location()
def _get_current_location(self, project_id=None, zone=None):
return munch.Munch(
cloud=self.name,
# TODO(efried): This is wrong, but it only seems to be used in a
# repr; can we get rid of it?
region_name=self.config.get_region_name(),
zone=zone,
project=self._get_project_info(project_id),
)
def _get_identity_location(self):
'''Identity resources do not exist inside of projects.'''
return munch.Munch(
cloud=self.name,
region_name=None,
zone=None,
project=munch.Munch(
id=None,
name=None,
domain_id=None,
domain_name=None))
def _get_project_id_param_dict(self, name_or_id):
if name_or_id:
project = self.get_project(name_or_id)
if not project:
return {}
if self._is_client_version('identity', 3):
return {'default_project_id': project['id']}
else:
return {'tenant_id': project['id']}
else:
return {}
def _get_domain_id_param_dict(self, domain_id):
"""Get a useable domain."""
# Keystone v3 requires domains for user and project creation. v2 does
# not. However, keystone v2 does not allow user creation by non-admin
# users, so we can throw an error to the user that does not need to
# mention api versions
if self._is_client_version('identity', 3):
if not domain_id:
raise exc.OpenStackCloudException(
"User or project creation requires an explicit"
" domain_id argument.")
else:
return {'domain_id': domain_id}
else:
return {}
def _get_identity_params(self, domain_id=None, project=None):
"""Get the domain and project/tenant parameters if needed.
keystone v2 and v3 are divergent enough that we need to pass or not
pass project or tenant_id or domain or nothing in a sane manner.
"""
ret = {}
ret.update(self._get_domain_id_param_dict(domain_id))
ret.update(self._get_project_id_param_dict(project))
return ret
def range_search(self, data, filters):
"""Perform integer range searches across a list of dictionaries.
Given a list of dictionaries, search across the list using the given
dictionary keys and a range of integer values for each key. Only
dictionaries that match ALL search filters across the entire original
data set will be returned.
It is not a requirement that each dictionary contain the key used
for searching. Those without the key will be considered non-matching.
The range values must be string values and is either a set of digits
representing an integer for matching, or a range operator followed by
a set of digits representing an integer for matching. If a range
operator is not given, exact value matching will be used. Valid
operators are one of: <,>,<=,>=
:param data: List of dictionaries to be searched.
:param filters: Dict describing the one or more range searches to
perform. If more than one search is given, the result will be the
members of the original data set that match ALL searches. An
example of filtering by multiple ranges::
{"vcpus": "<=5", "ram": "<=2048", "disk": "1"}
:returns: A list subset of the original data set.
:raises: OpenStackCloudException on invalid range expressions.
"""
filtered = []
for key, range_value in filters.items():
# We always want to operate on the full data set so that
# calculations for minimum and maximum are correct.
results = _utils.range_filter(data, key, range_value)
if not filtered:
# First set of results
filtered = results
else:
# The combination of all searches should be the intersection of
# all result sets from each search. So adjust the current set
# of filtered data by computing its intersection with the
# latest result set.
filtered = [r for r in results for f in filtered if r == f]
return filtered
def _get_and_munchify(self, key, data):
"""Wrapper around meta.get_and_munchify.
Some of the methods expect a `meta` attribute to be passed in as
part of the method signature. In those methods the meta param is
overriding the meta module making the call to meta.get_and_munchify
to fail.
"""
if isinstance(data, requests.models.Response):
data = proxy._json_response(data)
return meta.get_and_munchify(key, data)
def get_name(self):
return self.name
def get_session_endpoint(self, service_key):
try:
return self.config.get_session_endpoint(service_key)
except keystoneauth1.exceptions.catalog.EndpointNotFound as e:
self.log.debug(
"Endpoint not found in %s cloud: %s", self.name, str(e))
endpoint = None
except exc.OpenStackCloudException:
raise
except Exception as e:
raise exc.OpenStackCloudException(
"Error getting {service} endpoint on {cloud}:{region}:"
" {error}".format(
service=service_key,
cloud=self.name,
region=self.config.get_region_name(service_key),
error=str(e)))
return endpoint
def has_service(self, service_key):
if not self.config.has_service(service_key):
# TODO(mordred) add a stamp here so that we only report this once
if not (service_key in self._disable_warnings
and self._disable_warnings[service_key]):
self.log.debug(
"Disabling %(service_key)s entry in catalog"
" per config", {'service_key': service_key})
self._disable_warnings[service_key] = True
return False
try:
endpoint = self.get_session_endpoint(service_key)
except exc.OpenStackCloudException:
return False
if endpoint:
return True
else:
return False
|
dtroyer/python-openstacksdk
|
openstack/cloud/openstackcloud.py
|
Python
|
apache-2.0
| 31,888
|
from diktya.random_search import fmin
import numpy as np
def quadratic_function(x):
return (x - 2) ** 2
def space_function():
return np.random.uniform(-2, 4)
def test_fmin():
results = fmin(quadratic_function, space_function, n=50, verbose=1)
val, space = results[0]
print(val, space)
assert val <= 0.1
|
BioroboticsLab/diktya
|
tests/test_random_search.py
|
Python
|
apache-2.0
| 333
|
"""
Interface between pybedtools and the R package VennDiagram.
Rather than depend on the user to have rpy2 installed, this simply writes an
R script that can be edited and tweaked by the user before being run in R.
"""
import os
import string
import pybedtools
from pybedtools import helpers
import subprocess
from collections import OrderedDict
# really just fill in x and filename...leave the rest up to the user.
#
# Note that the closing parentheses is missing -- that's so the user can add
# kwargs from the calling function
template = string.Template("""
library(VennDiagram)
venn.diagram(
x=$x,
filename=$filename,
category.names = $names
""")
def _list_to_R_syntax(x):
"""
Convert items in `x` to a string, and replace tabs with pipes in Interval
string representations. Put everything into an R vector and return as one
big string.
"""
items = []
for i in x:
if isinstance(i, pybedtools.Interval):
i = str(i).replace('\t', '|')
items.append('"%s"' % i)
return 'c(%s)' % ','.join(items)
def _dict_to_R_named_list(d):
"""
Calls _list_to_R_syntax for each item. Returns one big string.
"""
items = []
for key, val in d.items():
items.append('"%s" = %s' % (key, _list_to_R_syntax(val)))
return 'list(%s)' % ', '.join(items)
def truncator(feature):
"""
Convert a feature of any format into a BED3 format.
"""
return pybedtools.create_interval_from_list(
[feature.chrom, str(feature.start), str(feature.stop)])
def cleaned_intersect(items):
"""
Perform interval intersections such that the end products have identical \
features for overlapping intervals.
The VennDiagram package does *set* intersection, not *interval*
intersection. So the goal here is to represent intersecting intervals as
intersecting sets of strings.
Doing a simple BEDTools intersectBed call doesn't do the trick (even with
the -u argument). As a concrete example, what would the string be for an
intersection of the feature "chr1:1-100" in file `x` and "chr1:50-200" in
file `y`?
The method used here is to substitute the intervals in `y` that overlap `x`
with the corresponding elements in `x`. This means that in the resulting
sets, the overlapping features are identical. To follow up with the
example, both `x` and `y` would have an item "chr1:50-200" in their sets,
simply indicating *that* one interval overlapped.
Venn diagrams are not well suited for nested overlaps or multi-overlaps.
To illustrate, try drawing the 2-way Venn diagram of the following two
files. Specifically, what number goes in the middle -- the number of
features in `x` that intersect `y` (1) or the number of features in `y`
that intersect `x` (2)?::
x:
chr1 1 100
chr1 500 6000
y:
chr1 50 100
chr1 80 200
chr9 777 888
In this case, this function will return the following sets::
x:
chr1:1-100
chr1:500-6000
y:
chr1:1-100
chr9:777-888
This means that while `x` does not change in length, `y` can. For example,
if there are 2 features in `x` that overlap one feature in `y`, then `y`
will gain those two features in place of its single original feature.
This strategy is extended for multiple intersections -- see the source for
details.
"""
if len(items) == 2:
x = items[0].each(truncator).saveas()
y = items[1].each(truncator).saveas()
# Combine the unique-to-y intervals with the shared-with-x intervals.
# Since x is first in x+y, resulting features are from x.
new_y = (y - x).cat(x + y)
return x, new_y
if len(items) == 3:
x = items[0].each(truncator).saveas()
y = items[1].each(truncator).saveas()
z = items[2].each(truncator).saveas()
# Same as above. Don't care about z yet; this means that y will not
# change because of z.
new_y = (y - x).cat(x + y)
# Combine:
# unique-to-z
# shared-with-any-x
# shared-with-unique-to-y
new_z = (z - y - x).cat(x + z).cat((y - x) + z)
return x, new_y, new_z
if len(items) == 4:
x = items[0].each(truncator).saveas()
y = items[1].each(truncator).saveas()
z = items[2].each(truncator).saveas()
q = items[3].each(truncator).saveas()
# Same as 2-way
new_y = (y - x).cat(x + y)
# Same as 3-way
new_z = (z - y - x).cat(x + z).cat((y - x) + z)
# Combine:
# unique-to-q
# shared-with-any-x
# shared-with-unique-to-y
# shared-with-unique-to-z
new_q = (q - z - y - x)\
.cat(x + q)\
.cat((y - x) + q)\
.cat((z - y - x) + q)
return x, new_y, new_z, new_q
def venn_maker(beds, names=None, figure_filename=None, script_filename=None,
additional_args=None, run=False):
"""
Given a list of interval files, write an R script to create a Venn \
diagram of overlaps (and optionally run it).
The R script calls the venn.diagram function of the R package VennDiagram
for extremely flexible Venn and Euler diagram creation. Uses
`cleaned_intersect()` to create string representations of shared intervals.
`beds` is a list of up to 4 filenames or BedTools.
`names` is a list of names to use for the Venn diagram, in the same order
as `beds`. Default is "abcd"[:len(beds)].
`figure_filename` is the TIFF file to save the figure as.
`script_filename` is the optional filename to write the R script to
`additional_args` is list that will be inserted into the R script,
verbatim. For example, to use scaled Euler diagrams with different colors,
use::
additional_args = ['euler.d=TRUE',
'scaled=TRUE',
'cat.col=c("red","blue")']
If `run` is True, then assume R is installed, is on the path, and has
VennDiagram installed . . . and run the script. The resulting filename
will be saved as `figure_filename`.
"""
if figure_filename is None:
figure_filename = 'NULL'
else:
figure_filename = '"%s"' % figure_filename
if names is None:
names = "abcd"[:len(beds)]
_beds = []
for bed in beds:
if not isinstance(bed, pybedtools.BedTool):
bed = pybedtools.BedTool(bed)
_beds.append(bed)
cleaned = cleaned_intersect(_beds)
results = OrderedDict(zip(names, cleaned))
s = template.substitute(
x=_dict_to_R_named_list(results),
filename=figure_filename,
names=_list_to_R_syntax(names))
if additional_args:
s += ',' + ', '.join(additional_args)
s += ")"
if not script_filename:
fn = pybedtools.BedTool._tmp()
else:
fn = script_filename
fout = open(fn, 'w')
fout.write(s)
fout.close()
out = fn + '.Rout'
if run:
if not pybedtools.settings._R_installed:
helpers._check_for_R()
cmds = [os.path.join(pybedtools.settings._R_path, 'R'), 'CMD', 'BATCH',
fn, out]
p = subprocess.Popen(
cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if stdout or stderr:
print "stdout:", stdout
print "stderr:", stderr
if not script_filename:
return s
return None
|
jos4uke/getSeqFlankBlatHit
|
lib/python2.7/site-packages/pybedtools/contrib/venn_maker.py
|
Python
|
gpl-2.0
| 7,663
|
#
# Created by: Pearu Peterson, March 2002
#
""" Test functions for scipy.linalg.matfuncs module
"""
from __future__ import division, print_function, absolute_import
import math
import numpy as np
from numpy import array, eye, exp, random
from numpy.linalg import matrix_power
from numpy.testing import (
assert_allclose, assert_, assert_array_almost_equal, assert_equal,
assert_array_almost_equal_nulp)
from scipy._lib._numpy_compat import suppress_warnings
from scipy.sparse import csc_matrix, SparseEfficiencyWarning
from scipy.sparse.construct import eye as speye
from scipy.sparse.linalg.matfuncs import (expm, _expm,
ProductOperator, MatrixPowerOperator,
_onenorm_matrix_power_nnm)
from scipy.linalg import logm
from scipy.special import factorial, binom
import scipy.sparse
import scipy.sparse.linalg
def _burkardt_13_power(n, p):
"""
A helper function for testing matrix functions.
Parameters
----------
n : integer greater than 1
Order of the square matrix to be returned.
p : non-negative integer
Power of the matrix.
Returns
-------
out : ndarray representing a square matrix
A Forsythe matrix of order n, raised to the power p.
"""
# Input validation.
if n != int(n) or n < 2:
raise ValueError('n must be an integer greater than 1')
n = int(n)
if p != int(p) or p < 0:
raise ValueError('p must be a non-negative integer')
p = int(p)
# Construct the matrix explicitly.
a, b = divmod(p, n)
large = np.power(10.0, -n*a)
small = large * np.power(10.0, -n)
return np.diag([large]*(n-b), b) + np.diag([small]*b, b-n)
def test_onenorm_matrix_power_nnm():
np.random.seed(1234)
for n in range(1, 5):
for p in range(5):
M = np.random.random((n, n))
Mp = np.linalg.matrix_power(M, p)
observed = _onenorm_matrix_power_nnm(M, p)
expected = np.linalg.norm(Mp, 1)
assert_allclose(observed, expected)
class TestExpM(object):
def test_zero_ndarray(self):
a = array([[0.,0],[0,0]])
assert_array_almost_equal(expm(a),[[1,0],[0,1]])
def test_zero_sparse(self):
a = csc_matrix([[0.,0],[0,0]])
assert_array_almost_equal(expm(a).toarray(),[[1,0],[0,1]])
def test_zero_matrix(self):
a = np.matrix([[0.,0],[0,0]])
assert_array_almost_equal(expm(a),[[1,0],[0,1]])
def test_misc_types(self):
A = expm(np.array([[1]]))
assert_allclose(expm(((1,),)), A)
assert_allclose(expm([[1]]), A)
assert_allclose(expm(np.matrix([[1]])), A)
assert_allclose(expm(np.array([[1]])), A)
assert_allclose(expm(csc_matrix([[1]])).A, A)
B = expm(np.array([[1j]]))
assert_allclose(expm(((1j,),)), B)
assert_allclose(expm([[1j]]), B)
assert_allclose(expm(np.matrix([[1j]])), B)
assert_allclose(expm(csc_matrix([[1j]])).A, B)
def test_bidiagonal_sparse(self):
A = csc_matrix([
[1, 3, 0],
[0, 1, 5],
[0, 0, 2]], dtype=float)
e1 = math.exp(1)
e2 = math.exp(2)
expected = np.array([
[e1, 3*e1, 15*(e2 - 2*e1)],
[0, e1, 5*(e2 - e1)],
[0, 0, e2]], dtype=float)
observed = expm(A).toarray()
assert_array_almost_equal(observed, expected)
def test_padecases_dtype_float(self):
for dtype in [np.float32, np.float64]:
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
A = scale * eye(3, dtype=dtype)
observed = expm(A)
expected = exp(scale) * eye(3, dtype=dtype)
assert_array_almost_equal_nulp(observed, expected, nulp=100)
def test_padecases_dtype_complex(self):
for dtype in [np.complex64, np.complex128]:
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
A = scale * eye(3, dtype=dtype)
observed = expm(A)
expected = exp(scale) * eye(3, dtype=dtype)
assert_array_almost_equal_nulp(observed, expected, nulp=100)
def test_padecases_dtype_sparse_float(self):
# float32 and complex64 lead to errors in spsolve/UMFpack
dtype = np.float64
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
a = scale * speye(3, 3, dtype=dtype, format='csc')
e = exp(scale) * eye(3, dtype=dtype)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a csc_matrix is expensive.")
exact_onenorm = _expm(a, use_exact_onenorm=True).toarray()
inexact_onenorm = _expm(a, use_exact_onenorm=False).toarray()
assert_array_almost_equal_nulp(exact_onenorm, e, nulp=100)
assert_array_almost_equal_nulp(inexact_onenorm, e, nulp=100)
def test_padecases_dtype_sparse_complex(self):
# float32 and complex64 lead to errors in spsolve/UMFpack
dtype = np.complex128
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
a = scale * speye(3, 3, dtype=dtype, format='csc')
e = exp(scale) * eye(3, dtype=dtype)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"Changing the sparsity structure of a csc_matrix is expensive.")
assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100)
def test_logm_consistency(self):
random.seed(1234)
for dtype in [np.float64, np.complex128]:
for n in range(1, 10):
for scale in [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2]:
# make logm(A) be of a given scale
A = (eye(n) + random.rand(n, n) * scale).astype(dtype)
if np.iscomplexobj(A):
A = A + 1j * random.rand(n, n) * scale
assert_array_almost_equal(expm(logm(A)), A)
def test_integer_matrix(self):
Q = np.array([
[-3, 1, 1, 1],
[1, -3, 1, 1],
[1, 1, -3, 1],
[1, 1, 1, -3]])
assert_allclose(expm(Q), expm(1.0 * Q))
def test_integer_matrix_2(self):
# Check for integer overflows
Q = np.array([[-500, 500, 0, 0],
[0, -550, 360, 190],
[0, 630, -630, 0],
[0, 0, 0, 0]], dtype=np.int16)
assert_allclose(expm(Q), expm(1.0 * Q))
Q = csc_matrix(Q)
assert_allclose(expm(Q).A, expm(1.0 * Q).A)
def test_triangularity_perturbation(self):
# Experiment (1) of
# Awad H. Al-Mohy and Nicholas J. Higham (2012)
# Improved Inverse Scaling and Squaring Algorithms
# for the Matrix Logarithm.
A = np.array([
[3.2346e-1, 3e4, 3e4, 3e4],
[0, 3.0089e-1, 3e4, 3e4],
[0, 0, 3.221e-1, 3e4],
[0, 0, 0, 3.0744e-1]],
dtype=float)
A_logm = np.array([
[-1.12867982029050462e+00, 9.61418377142025565e+04,
-4.52485573953179264e+09, 2.92496941103871812e+14],
[0.00000000000000000e+00, -1.20101052953082288e+00,
9.63469687211303099e+04, -4.68104828911105442e+09],
[0.00000000000000000e+00, 0.00000000000000000e+00,
-1.13289322264498393e+00, 9.53249183094775653e+04],
[0.00000000000000000e+00, 0.00000000000000000e+00,
0.00000000000000000e+00, -1.17947533272554850e+00]],
dtype=float)
assert_allclose(expm(A_logm), A, rtol=1e-4)
# Perturb the upper triangular matrix by tiny amounts,
# so that it becomes technically not upper triangular.
random.seed(1234)
tiny = 1e-17
A_logm_perturbed = A_logm.copy()
A_logm_perturbed[1, 0] = tiny
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Ill-conditioned.*")
A_expm_logm_perturbed = expm(A_logm_perturbed)
rtol = 1e-4
atol = 100 * tiny
assert_(not np.allclose(A_expm_logm_perturbed, A, rtol=rtol, atol=atol))
def test_burkardt_1(self):
# This matrix is diagonal.
# The calculation of the matrix exponential is simple.
#
# This is the first of a series of matrix exponential tests
# collected by John Burkardt from the following sources.
#
# Alan Laub,
# Review of "Linear System Theory" by Joao Hespanha,
# SIAM Review,
# Volume 52, Number 4, December 2010, pages 779--781.
#
# Cleve Moler and Charles Van Loan,
# Nineteen Dubious Ways to Compute the Exponential of a Matrix,
# Twenty-Five Years Later,
# SIAM Review,
# Volume 45, Number 1, March 2003, pages 3--49.
#
# Cleve Moler,
# Cleve's Corner: A Balancing Act for the Matrix Exponential,
# 23 July 2012.
#
# Robert Ward,
# Numerical computation of the matrix exponential
# with accuracy estimate,
# SIAM Journal on Numerical Analysis,
# Volume 14, Number 4, September 1977, pages 600--610.
exp1 = np.exp(1)
exp2 = np.exp(2)
A = np.array([
[1, 0],
[0, 2],
], dtype=float)
desired = np.array([
[exp1, 0],
[0, exp2],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_2(self):
# This matrix is symmetric.
# The calculation of the matrix exponential is straightforward.
A = np.array([
[1, 3],
[3, 2],
], dtype=float)
desired = np.array([
[39.322809708033859, 46.166301438885753],
[46.166301438885768, 54.711576854329110],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_3(self):
# This example is due to Laub.
# This matrix is ill-suited for the Taylor series approach.
# As powers of A are computed, the entries blow up too quickly.
exp1 = np.exp(1)
exp39 = np.exp(39)
A = np.array([
[0, 1],
[-39, -40],
], dtype=float)
desired = np.array([
[
39/(38*exp1) - 1/(38*exp39),
-np.expm1(-38) / (38*exp1)],
[
39*np.expm1(-38) / (38*exp1),
-1/(38*exp1) + 39/(38*exp39)],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_4(self):
# This example is due to Moler and Van Loan.
# The example will cause problems for the series summation approach,
# as well as for diagonal Pade approximations.
A = np.array([
[-49, 24],
[-64, 31],
], dtype=float)
U = np.array([[3, 1], [4, 2]], dtype=float)
V = np.array([[1, -1/2], [-2, 3/2]], dtype=float)
w = np.array([-17, -1], dtype=float)
desired = np.dot(U * np.exp(w), V)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_5(self):
# This example is due to Moler and Van Loan.
# This matrix is strictly upper triangular
# All powers of A are zero beyond some (low) limit.
# This example will cause problems for Pade approximations.
A = np.array([
[0, 6, 0, 0],
[0, 0, 6, 0],
[0, 0, 0, 6],
[0, 0, 0, 0],
], dtype=float)
desired = np.array([
[1, 6, 18, 36],
[0, 1, 6, 18],
[0, 0, 1, 6],
[0, 0, 0, 1],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_6(self):
# This example is due to Moler and Van Loan.
# This matrix does not have a complete set of eigenvectors.
# That means the eigenvector approach will fail.
exp1 = np.exp(1)
A = np.array([
[1, 1],
[0, 1],
], dtype=float)
desired = np.array([
[exp1, exp1],
[0, exp1],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_7(self):
# This example is due to Moler and Van Loan.
# This matrix is very close to example 5.
# Mathematically, it has a complete set of eigenvectors.
# Numerically, however, the calculation will be suspect.
exp1 = np.exp(1)
eps = np.spacing(1)
A = np.array([
[1 + eps, 1],
[0, 1 - eps],
], dtype=float)
desired = np.array([
[exp1, exp1],
[0, exp1],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_8(self):
# This matrix was an example in Wikipedia.
exp4 = np.exp(4)
exp16 = np.exp(16)
A = np.array([
[21, 17, 6],
[-5, -1, -6],
[4, 4, 16],
], dtype=float)
desired = np.array([
[13*exp16 - exp4, 13*exp16 - 5*exp4, 2*exp16 - 2*exp4],
[-9*exp16 + exp4, -9*exp16 + 5*exp4, -2*exp16 + 2*exp4],
[16*exp16, 16*exp16, 4*exp16],
], dtype=float) * 0.25
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_9(self):
# This matrix is due to the NAG Library.
# It is an example for function F01ECF.
A = np.array([
[1, 2, 2, 2],
[3, 1, 1, 2],
[3, 2, 1, 2],
[3, 3, 3, 1],
], dtype=float)
desired = np.array([
[740.7038, 610.8500, 542.2743, 549.1753],
[731.2510, 603.5524, 535.0884, 542.2743],
[823.7630, 679.4257, 603.5524, 610.8500],
[998.4355, 823.7630, 731.2510, 740.7038],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_10(self):
# This is Ward's example #1.
# It is defective and nonderogatory.
A = np.array([
[4, 2, 0],
[1, 4, 1],
[1, 1, 4],
], dtype=float)
assert_allclose(sorted(scipy.linalg.eigvals(A)), (3, 3, 6))
desired = np.array([
[147.8666224463699, 183.7651386463682, 71.79703239999647],
[127.7810855231823, 183.7651386463682, 91.88256932318415],
[127.7810855231824, 163.6796017231806, 111.9681062463718],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_11(self):
# This is Ward's example #2.
# It is a symmetric matrix.
A = np.array([
[29.87942128909879, 0.7815750847907159, -2.289519314033932],
[0.7815750847907159, 25.72656945571064, 8.680737820540137],
[-2.289519314033932, 8.680737820540137, 34.39400925519054],
], dtype=float)
assert_allclose(scipy.linalg.eigvalsh(A), (20, 30, 40))
desired = np.array([
[
5.496313853692378E+15,
-1.823188097200898E+16,
-3.047577080858001E+16],
[
-1.823188097200899E+16,
6.060522870222108E+16,
1.012918429302482E+17],
[
-3.047577080858001E+16,
1.012918429302482E+17,
1.692944112408493E+17],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_12(self):
# This is Ward's example #3.
# Ward's algorithm has difficulty estimating the accuracy
# of its results.
A = np.array([
[-131, 19, 18],
[-390, 56, 54],
[-387, 57, 52],
], dtype=float)
assert_allclose(sorted(scipy.linalg.eigvals(A)), (-20, -2, -1))
desired = np.array([
[-1.509644158793135, 0.3678794391096522, 0.1353352811751005],
[-5.632570799891469, 1.471517758499875, 0.4060058435250609],
[-4.934938326088363, 1.103638317328798, 0.5413411267617766],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_13(self):
# This is Ward's example #4.
# This is a version of the Forsythe matrix.
# The eigenvector problem is badly conditioned.
# Ward's algorithm has difficulty esimating the accuracy
# of its results for this problem.
#
# Check the construction of one instance of this family of matrices.
A4_actual = _burkardt_13_power(4, 1)
A4_desired = [[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[1e-4, 0, 0, 0]]
assert_allclose(A4_actual, A4_desired)
# Check the expm for a few instances.
for n in (2, 3, 4, 10):
# Approximate expm using Taylor series.
# This works well for this matrix family
# because each matrix in the summation,
# even before dividing by the factorial,
# is entrywise positive with max entry 10**(-floor(p/n)*n).
k = max(1, int(np.ceil(16/n)))
desired = np.zeros((n, n), dtype=float)
for p in range(n*k):
Ap = _burkardt_13_power(n, p)
assert_equal(np.min(Ap), 0)
assert_allclose(np.max(Ap), np.power(10, -np.floor(p/n)*n))
desired += Ap / factorial(p)
actual = expm(_burkardt_13_power(n, 1))
assert_allclose(actual, desired)
def test_burkardt_14(self):
# This is Moler's example.
# This badly scaled matrix caused problems for MATLAB's expm().
A = np.array([
[0, 1e-8, 0],
[-(2e10 + 4e8/6.), -3, 2e10],
[200./3., 0, -200./3.],
], dtype=float)
desired = np.array([
[0.446849468283175, 1.54044157383952e-09, 0.462811453558774],
[-5743067.77947947, -0.0152830038686819, -4526542.71278401],
[0.447722977849494, 1.54270484519591e-09, 0.463480648837651],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_pascal(self):
# Test pascal triangle.
# Nilpotent exponential, used to trigger a failure (gh-8029)
for scale in [1.0, 1e-3, 1e-6]:
for n in range(120):
A = np.diag(np.arange(1, n + 1), -1) * scale
B = expm(A)
sc = scale**np.arange(n, -1, -1)
if np.any(sc < 1e-300):
continue
got = B
expected = binom(np.arange(n + 1)[:,None],
np.arange(n + 1)[None,:]) * sc[None,:] / sc[:,None]
err = abs(expected - got).max()
atol = 1e-13 * abs(expected).max()
assert_allclose(got, expected, atol=atol)
class TestOperators(object):
def test_product_operator(self):
random.seed(1234)
n = 5
k = 2
nsamples = 10
for i in range(nsamples):
A = np.random.randn(n, n)
B = np.random.randn(n, n)
C = np.random.randn(n, n)
D = np.random.randn(n, k)
op = ProductOperator(A, B, C)
assert_allclose(op.matmat(D), A.dot(B).dot(C).dot(D))
assert_allclose(op.T.matmat(D), (A.dot(B).dot(C)).T.dot(D))
def test_matrix_power_operator(self):
random.seed(1234)
n = 5
k = 2
p = 3
nsamples = 10
for i in range(nsamples):
A = np.random.randn(n, n)
B = np.random.randn(n, k)
op = MatrixPowerOperator(A, p)
assert_allclose(op.matmat(B), matrix_power(A, p).dot(B))
assert_allclose(op.T.matmat(B), matrix_power(A, p).T.dot(B))
|
Eric89GXL/scipy
|
scipy/sparse/linalg/tests/test_matfuncs.py
|
Python
|
bsd-3-clause
| 20,275
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2016 Intel Corp
#
# Authors: Prudhvi Rao Shedimbi <prudhvi.rao.shedimbi@intel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
neutron_client = cfg.OptGroup(name='neutron_client',
title='Configuration Options for Neutron')
NEUTRON_CLIENT_OPTS = [
cfg.StrOpt('api_version',
default='2.0',
help='Version of Neutron API to use in neutronclient.'),
cfg.StrOpt('endpoint_type',
default='publicURL',
help='Type of endpoint to use in neutronclient. '
'Supported values: internalURL, publicURL, adminURL. '
'The default is publicURL.'),
cfg.StrOpt('region_name',
help='Region in Identity service catalog to use for '
'communication with the OpenStack service.')]
def register_opts(conf):
conf.register_group(neutron_client)
conf.register_opts(NEUTRON_CLIENT_OPTS, group=neutron_client)
def list_opts():
return [(neutron_client, NEUTRON_CLIENT_OPTS)]
|
openstack/watcher
|
watcher/conf/neutron_client.py
|
Python
|
apache-2.0
| 1,608
|
from shopify_service import ShopifyService
from shop_service import ShopService
from config_service import ConfigService
from plan_service import PlanService
|
BootstrapHeroes/django-shopify
|
django_shopify/shopify_app/services/__init__.py
|
Python
|
gpl-3.0
| 157
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# generate-plugin.py - pluma plugin skeletton generator
# This file is part of pluma
#
# Copyright (C) 2006 - Steve Frécinaux
#
# pluma is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# pluma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pluma; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA
import re
import os
import sys
import getopt
from datetime import date
import preprocessor
# Default values of command line options
options = {
'language' : 'c',
'description' : 'Type here a short description of your plugin',
'author' : os.getenv('USERNAME'),
'email' : os.getenv('LOGNAME') + '@email.com',
'standalone' : False,
'with-side-pane' : False,
'with-bottom-pane' : False,
'with-menu' : False,
'with-config-dlg' : False
}
USAGE = """Usage:
%s [OPTIONS...] pluginname
""" % os.path.basename(sys.argv[0])
HELP = USAGE + """
generate skeleton source tree for a new pluma plugin.
Options:
--author Set the author name
--email Set the author email
--description Set the description you want for your new plugin
--standalone Is this plugin intended to be distributed as a
standalone package ? (N/A)
--language / -l Set the language (C) [default: %(language)s]
--with-$feature Enable $feature
--without-$feature Disable $feature
--help / -h Show this message and exits
Features:
config-dlg Plugin configuration dialog
menu Plugin menu entries
side-pane Side pane item (N/A)
bottom-pane Bottom pane item (N/A)
""" % options
TEMPLATE_DIR = os.path.join(os.path.dirname(sys.argv[0]), "plugin_template")
# Parsing command line options
try:
opts, args = getopt.getopt(sys.argv[1:],
'l:h',
['language=',
'description=',
'author=',
'email=',
'standalone',
'with-menu' , 'without-menu',
'with-side-pane' , 'without-side-pane',
'with-bottom-pane' , 'without-bottom-pane',
'with-config-dlg' , 'without-config-dlg',
'help'])
except getopt.error, exc:
print >>sys.stderr, '%s: %s' % (sys.argv[0], str(exc))
print >>sys.stderr, USAGE
sys.exit(1)
for opt, arg in opts:
if opt in ('-h', '--help'):
print >>sys.stderr, HELP
sys.exit(0)
elif opt in ('--description', '--author', '--email'):
options[opt[2:]] = arg
elif opt in ('-l', '--language'):
options['language'] = arg.lower()
elif opt == '--standalone':
options['standalone'] = True
elif opt[0:7] == '--with-':
options['with-' + opt[7:]] = True
elif opt[0:10] == '--without-':
options['with-' + opt[10:]] = False
# What's the new plugin name ?
if len(args) < 1:
print >>sys.stderr, USAGE
sys.exit(1)
plugin_name = args[0]
plugin_id = re.sub('[^a-z0-9_]', '', plugin_name.lower().replace(' ', '_'))
plugin_module = plugin_id.replace('_', '-')
directives = {
'PLUGIN_NAME' : plugin_name,
'PLUGIN_MODULE' : plugin_module,
'PLUGIN_ID' : plugin_id,
'AUTHOR_FULLNAME' : options['author'],
'AUTHOR_EMAIL' : options['email'],
'DATE_YEAR' : date.today().year,
'DESCRIPTION' : options['description'],
}
# Files to be generated by the preprocessor, in the form "template : outfile"
output_files = {
'Makefile.am': '%s/Makefile.am' % plugin_module,
'pluma-plugin.desktop.in': '%s/%s.pluma-plugin.desktop.in' % (plugin_module, plugin_module)
}
if options['language'] == 'c':
output_files['pluma-plugin.c'] = '%s/%s-plugin.c' % (plugin_module, plugin_module)
output_files['pluma-plugin.h'] = '%s/%s-plugin.h' % (plugin_module, plugin_module)
else:
print >>sys.stderr, 'Value of --language should be C'
print >>sys.stderr, USAGE
sys.exit(1)
if options['standalone']:
output_files['configure.ac'] = 'configure.ac'
if options['with-side-pane']:
directives['WITH_SIDE_PANE'] = True
if options['with-bottom-pane']:
directives['WITH_BOTTOM_PANE'] = True
if options['with-menu']:
directives['WITH_MENU'] = True
if options['with-config-dlg']:
directives['WITH_CONFIGURE_DIALOG'] = True
# Generate the plugin base
for infile, outfile in output_files.iteritems():
print 'Processing %s\n' \
' into %s...' % (infile, outfile)
infile = os.path.join(TEMPLATE_DIR, infile)
outfile = os.path.join(os.getcwd(), outfile)
if not os.path.isfile(infile):
print >>sys.stderr, 'Input file does not exist : %s.' % os.path.basename(infile)
continue
# Make sure the destination directory exists
if not os.path.isdir(os.path.split(outfile)[0]):
os.makedirs(os.path.split(outfile)[0])
# Variables relative to the generated file
directives['DIRNAME'], directives['FILENAME'] = os.path.split(outfile)
# Generate the file
preprocessor.process(infile, outfile, directives.copy())
print 'Done.'
# ex:ts=4:et:
|
sc0w/pluma
|
tools/generate-plugin.py
|
Python
|
gpl-2.0
| 5,946
|
# Generated by Django 2.2.4 on 2019-10-29 04:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tagging', '0007_auto_20191029_0434'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'ordering': ['order', 'tag'], 'verbose_name_plural': 'categories'},
),
migrations.AddField(
model_name='category',
name='hashtag',
field=models.BooleanField(default=False),
),
]
|
jeromecc/doctoctocbot
|
src/tagging/migrations/0008_auto_20191029_0518.py
|
Python
|
mpl-2.0
| 557
|
import logging
# https://github.com/jedie/pathlib_revised/
from pathlib_revised import Path2
# https://github.com/jedie/PyHardLinkBackup
from pyhardlinkbackup.backup_app.models import BackupEntry
from pyhardlinkbackup.phlb.config import phlb_config
from pyhardlinkbackup.phlb.path_helper import rename2temp
log = logging.getLogger(__name__)
def deduplicate(backup_entry, hash_hexdigest):
abs_dst_root = Path2(phlb_config.backup_path)
try:
backup_entry.relative_to(abs_dst_root)
except ValueError as err:
raise ValueError(f"Backup entry not in backup root path: {err}")
assert backup_entry.is_file(), f"Is not a file: {backup_entry.path}"
old_backups = BackupEntry.objects.filter(content_info__hash_hexdigest=hash_hexdigest)
log.debug("There are %i old backup entries for the hash", old_backups.count())
old_backups = old_backups.exclude(no_link_source=True)
log.debug("%i old backup entries with 'no_link_source=False'", old_backups.count())
for old_backup in old_backups:
log.debug("+++ old: '%s'", old_backup)
abs_old_backup_path = old_backup.get_backup_path()
if not abs_old_backup_path.is_file():
# e.g.: User has delete a old backup
old_backup.no_link_source = True # Don't try this source in future
old_backup.save()
continue
if abs_old_backup_path == backup_entry.path:
log.warning(f"Skip own file: {abs_old_backup_path}")
continue
# TODO: compare hash / current content before replace with a link
temp_filepath = rename2temp(
src=backup_entry,
# Actually we would like to use the current filepath:
# dst=path_helper.abs_dst_filepath.parent,
# But this can result in a error on Windows, because
# the complete path length is limited to 259 Characters!
# see:
# https://msdn.microsoft.com/en-us/library/aa365247.aspx#maxpath
# on long path, we will fall into FileNotFoundError:
# https://github.com/jedie/PyHardLinkBackup/issues/13#issuecomment-176241894
# So we use the destination root directory:
dst=abs_dst_root,
prefix=f"{backup_entry.name}_",
suffix=".tmp",
tmp_max=10,
)
log.debug("%s was renamed to %s", backup_entry, temp_filepath)
try:
abs_old_backup_path.link(backup_entry) # call os.link()
except OSError as err:
temp_filepath.rename(backup_entry)
log.error(f"Can't link '{abs_old_backup_path}' to '{backup_entry}': {err}")
log.debug("Mark %r with 'no link source'.", old_backup)
old_backup.no_link_source = True
old_backup.save()
else:
temp_filepath.unlink() # FIXME
log.debug('Replaced with a hardlink to: %s', abs_old_backup_path)
return old_backup
|
jedie/PyHardlinkBackup
|
pyhardlinkbackup/phlb/deduplicate.py
|
Python
|
gpl-3.0
| 2,979
|
from django.contrib import admin
from blog.forms import EntryAdminForm
from blog.models import Entry
class EntryAdmin(admin.ModelAdmin):
list_display = ('title', 'created_at', 'created_by', 'status', 'is_micro')
list_display_links = ('created_at', 'title')
list_filter = ('status', 'created_at')
form = EntryAdminForm
def add_view(self, request):
self.exclude = ('created_by',)
return super(EntryAdmin, self).add_view(request)
def save_model(self, request, obj, form, change):
if not change:
obj.created_by = request.user
obj.save()
def publish_entry(self, request, queryset):
queryset.update(status=Entry.PUBLISHED_STATUS)
publish_entry.short_description = "Publish selected entries"
def draft_entry(self, request, queryset):
queryset.update(status=Entry.DRAFT_STATUS)
draft_entry.short_description = "Draft selected entries"
def unpublish_entry(self, request, queryset):
queryset.update(status=Entry.HIDDEN_STATUS)
unpublish_entry.short_description = "Unpublish selected entries"
actions = [publish_entry, draft_entry, unpublish_entry]
admin.site.register(Entry, EntryAdmin)
|
SushiTee/teerace
|
teerace/blog/admin.py
|
Python
|
bsd-3-clause
| 1,115
|
""" URL definitions for the verify_student app. """
from django.conf import settings
from django.conf.urls import patterns, url
from lms.djangoapps.verify_student import views
urlpatterns = patterns(
'',
# The user is starting the verification / payment process,
# most likely after enrolling in a course and selecting
# a "verified" track.
url(
r'^start-flow/{course}/$'.format(course=settings.COURSE_ID_PATTERN),
# Pylint seems to dislike the as_view() method because as_view() is
# decorated with `classonlymethod` instead of `classmethod`.
views.PayAndVerifyView.as_view(),
name="verify_student_start_flow",
kwargs={
'message': views.PayAndVerifyView.FIRST_TIME_VERIFY_MSG
}
),
# This is for A/B testing.
url(
r'^begin-flow/{course}/$'.format(course=settings.COURSE_ID_PATTERN),
views.PayAndVerifyView.as_view(),
name="verify_student_begin_flow",
kwargs={
'message': views.PayAndVerifyView.FIRST_TIME_VERIFY_MSG
}
),
# The user is enrolled in a non-paid mode and wants to upgrade.
# This is the same as the "start verification" flow,
# except with slight messaging changes.
url(
r'^upgrade/{course}/$'.format(course=settings.COURSE_ID_PATTERN),
views.PayAndVerifyView.as_view(),
name="verify_student_upgrade_and_verify",
kwargs={
'message': views.PayAndVerifyView.UPGRADE_MSG
}
),
# The user has paid and still needs to verify.
# Since the user has "just paid", we display *all* steps
# including payment. The user resumes the flow
# from the verification step.
# Note that if the user has already verified, this will redirect
# to the dashboard.
url(
r'^verify-now/{course}/$'.format(course=settings.COURSE_ID_PATTERN),
views.PayAndVerifyView.as_view(),
name="verify_student_verify_now",
kwargs={
'always_show_payment': True,
'current_step': views.PayAndVerifyView.FACE_PHOTO_STEP,
'message': views.PayAndVerifyView.VERIFY_NOW_MSG
}
),
# The user is returning to the flow after paying.
# This usually occurs after a redirect from the shopping cart
# once the order has been fulfilled.
url(
r'^payment-confirmation/{course}/$'.format(course=settings.COURSE_ID_PATTERN),
views.PayAndVerifyView.as_view(),
name="verify_student_payment_confirmation",
kwargs={
'always_show_payment': True,
'current_step': views.PayAndVerifyView.PAYMENT_CONFIRMATION_STEP,
'message': views.PayAndVerifyView.PAYMENT_CONFIRMATION_MSG
}
),
url(
r'^create_order',
views.create_order,
name="verify_student_create_order"
),
url(
r'^results_callback$',
views.results_callback,
name="verify_student_results_callback",
),
url(
r'^submit-photos/$',
views.SubmitPhotosView.as_view(),
name="verify_student_submit_photos"
),
# End-point for reverification
# Reverification occurs when a user's initial verification attempt
# is denied or expires. The user is allowed to retry by submitting
# new photos. This is different than *in-course* reverification,
# in which a student submits only face photos, which are matched
# against the ID photo from the user's initial verification attempt.
url(
r'^reverify$',
views.ReverifyView.as_view(),
name="verify_student_reverify"
),
)
# Fake response page for incourse reverification ( software secure )
if settings.FEATURES.get('ENABLE_SOFTWARE_SECURE_FAKE'):
from lms.djangoapps.verify_student.tests.fake_software_secure import SoftwareSecureFakeView
urlpatterns += patterns(
'verify_student.tests.fake_software_secure',
url(r'^software-secure-fake-response', SoftwareSecureFakeView.as_view()),
)
|
miptliot/edx-platform
|
lms/djangoapps/verify_student/urls.py
|
Python
|
agpl-3.0
| 4,037
|
import discord
import csv
from discord.ext import commands
from tabulate import tabulate
from models.character import Character
from models.server_settings import ServerSettings
from utils import *
class General(commands.Cog):
"""General commands."""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def info(self, ctx):
"""List important gear score info. Lowest, highest_gs, average, newest(not yet), oldest,
total users, total officers, total members"""
try:
info = []
members = Character.primary_chars(server=ctx.message.guild.id)
if members:
officers = members(rank='Officer')
average_gs = members.average('gear_score')
lowest_gs = members.order_by('+gear_score').first()
highest_gs = members.first()
count = members.count()
officer_count = officers.count()
member_count = count - officer_count
info.append([' Gear Score '])
info.append(['-------------------------'])
info.append(['Average', round(average_gs,2)])
info.append(['Lowest', lowest_gs.gear_score, lowest_gs.fam_name.title(), lowest_gs.char_name.title()])
info.append(['Highest', highest_gs.gear_score, highest_gs.fam_name.title(), highest_gs.char_name.title()])
info.append([''])
info.append(['-------------------------'])
info.append(['Counts '])
info.append(['-------------------------'])
info.append(['Total Officers', officer_count])
info.append(['Total Members', member_count])
info.append(['Guild Total', count])
data = tabulate(info)
else:
data = "No members yet to display info. Try adding some members :D"
await ctx.send(codify(data))
except Exception as e:
print(e)
await ctx.send("Could not retrieve info")
@commands.command(pass_context=True)
async def export(self, ctx):
"""Exports current guild data"""
members = Character.primary_chars(server=ctx.message.guild.id)
rows = get_row(members, False)
rows.insert(0, HEADERS)
try:
with open('./members.csv', 'w') as myfile:
wr = csv.writer(myfile)
wr.writerows(rows)
if is_officer_mode(ctx.message.guild.id):
if is_user_officer(ctx.message.author.roles):
await ctx.message.author.send('members.csv', file=discord.File('members.csv'))
await ctx.send(codify('File sent. Please check your private messages'))
return
else:
await ctx.send(codify(OFFICER_MODE_MESSAGE))
return
await ctx.send('Export', file=discord.File('./members.csv'))
except Exception as e:
print_error(e)
await ctx.send(codify('Could not export data'))
@commands.command(pass_context=True)
async def officer_mode(self, ctx, status: str):
"""Turns officer mode on or off: Officer mode limits list and lookup to officers only"""
if status.lower() not in ['on', 'off']:
await ctx.send(codify('Command only accepts on or off'))
return
server = ctx.message.guild.id
roles = [r.name for r in ctx.message.author.roles]
officer_mode = True if status.lower() == 'on' else False
if ADMIN_USER not in roles:
await ctx.send(codify('Only officers can perform this action'))
return
try:
server_setting = ServerSettings.objects(server=server).first()
if not server_setting:
setting = ServerSettings(server=server, officer_mode=officer_mode)
setting.save()
else:
server_setting.update(officer_mode=officer_mode)
server_setting.save()
logActivity('Server settings updated on {}'.format(ctx.message.guild.name), ctx.message.author.name)
await ctx.send(codify('Officer Mode successfuly changed to {}'.format(status)))
except Exception as e:
print_error(e)
await ctx.send("Could not change officer mode to {}".format(status))
def setup(bot):
bot.add_cog(General(bot))
|
pachev/gsbot
|
cogs/general.py
|
Python
|
mit
| 4,517
|
import time
time.sleep(0.25)
contents = clipboard.get_selection()
if len(contents) > 20:
title = contents[0:17] + "..."
else:
title = contents
folder = engine.get_folder("My Phrases")
engine.create_phrase(folder, title, contents)
|
andresgomezvidal/autokey_scripts
|
data/Scripts/Sample_Scripts/Phrase from selection.py
|
Python
|
mit
| 237
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Dariusz Suchojad <dsuch at zato.io>
Licensed under the BSD 3-clause license, see LICENSE.txt for terms and conditions.
"""
#
# * Django-like Redis pagination - a drop-in replacement except for the __init__ method.
#
# * Originally part of Zato - ESB, SOA and cloud integrations in Python https://zato.io
#
# Django
from django.core.paginator import Paginator
# ##############################################################################
class _ListObjectList(object):
""" List-backed list of results to paginate.
"""
def __init__(self, conn, key, *ignored):
self.conn = conn
self.key = key
def __getslice__(self, start, stop):
return self.conn.lrange(self.key, start, stop-1)
def count(self):
return self.conn.llen(self.key)
class _ZSetObjectList(object):
""" Sorted set-backed list of results to paginate.
"""
def __init__(self, conn, key, score_min, score_max):
self.conn = conn
self.key = key
self.score_min = score_min
self.score_max = score_max
self._use_zrangebyscore = score_min != '-inf' or score_max != '+inf'
self._zrangebyscore_results = None
def _get_zrangebyscore(self):
if not self._zrangebyscore_results:
self._zrangebyscore_results = self.conn.zrangebyscore(self.key, self.score_min, self.score_max)
return self._zrangebyscore_results
def __getslice__(self, start, stop):
if self._use_zrangebyscore:
return self._get_zrangebyscore()[start:stop]
else:
return self.conn.zrange(self.key, start, stop-1)
def count(self):
if self._use_zrangebyscore:
return len(self._get_zrangebyscore())
else:
return self.conn.zcard(self.key)
# ##############################################################################
_source_type_object_list = {
'list': _ListObjectList,
'zset': _ZSetObjectList,
}
class RedisPaginator(Paginator):
""" A subclass of Django's paginator that can paginate results kept in Redis.
Data in Redis can be
1) a list,
2) sorted set or
3) a range of a sorted set's members with a score between min and max.
For 1) and 2) data won't be fetched prior to pagination
For 3) however the whole subset as specified by score_min and score_max will be fetched
locally the first time it's needed and any changes in Redis won't be reflected
in the paginator until a new one is created. This is needed because ZRANGEBYSCORE
doesn't provide means to learn how many results there are without first fetching
them so even though the command has a 'LIMIT offset count' parameter, it cannot
be used here.
conn - a connection handle to Redis (subclass of such as redis.StrictRedis)
key - Redis key where data is stored
per_page - how many results per page to return
orphans - as in Django
allow_empty_first_page - as in Django
score_min - (ignored if key is not a list) 'min' parameter to ZRANGEBYSCORE, defaults to '-inf'
score_max - (ignored if key is not a list) 'max' parameter to ZRANGEBYSCORE, defaults to '+inf'
source_type - must be either 'list' or 'zset' to indicate what datatype is kept under given key
"""
def __init__(self, conn, key, per_page, orphans=0, allow_empty_first_page=True, score_min='-inf', score_max='+inf', source_type=None):
object_list_class = _source_type_object_list[source_type]
object_list = object_list_class(conn, key, score_min, score_max)
super(RedisPaginator, self).__init__(object_list, per_page, orphans, allow_empty_first_page)
# ##############################################################################
class ListPaginator(RedisPaginator):
""" A paginator for Redis list. See parent class's docstring for details.
"""
def __init__(self, *args, **kwargs):
kwargs['source_type'] = 'list'
super(ListPaginator, self).__init__(*args, **kwargs)
class ZSetPaginator(RedisPaginator):
""" A paginator for Redis sorted sets. See parent class's docstring for details.
"""
def __init__(self, *args, **kwargs):
kwargs['source_type'] = 'zset'
super(ZSetPaginator, self).__init__(*args, **kwargs)
|
zatosource/zato-redis-paginator
|
src/zato/redis_paginator/__init__.py
|
Python
|
bsd-3-clause
| 4,457
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a skeleton file that can serve as a starting point for a Python
console script. To run this script uncomment the following line in the
entry_points section in setup.cfg:
console_scripts =
fibonacci = openchat.skeleton:run
Then run `python setup.py install` which will install the command `fibonacci`
inside your current environment.
Besides console scripts, the header (i.e. until _logger...) of this file can
also be used as template for Python modules.
Note: This skeleton file can be safely removed if not needed!
"""
from __future__ import division, print_function, absolute_import
import argparse
import sys
import logging
from openchat import __version__
__author__ = "Hobson Lane"
__copyright__ = "Hobson Lane"
__license__ = "mit"
_logger = logging.getLogger(__name__)
def fib(n):
"""Fibonacci example function
Args:
n (int): integer
Returns:
int: n-th Fibonacci number
"""
assert n > 0
a, b = 1, 1
for i in range(n-1):
a, b = b, a+b
return a
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Just a Fibonnaci demonstration")
parser.add_argument(
'--version',
action='version',
version='openchat {ver}'.format(ver=__version__))
parser.add_argument(
dest="n",
help="n-th Fibonacci number",
type=int,
metavar="INT")
parser.add_argument(
'-v',
'--verbose',
dest="loglevel",
help="set loglevel to INFO",
action='store_const',
const=logging.INFO)
parser.add_argument(
'-vv',
'--very-verbose',
dest="loglevel",
help="set loglevel to DEBUG",
action='store_const',
const=logging.DEBUG)
return parser.parse_args(args)
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(level=loglevel, stream=sys.stdout,
format=logformat, datefmt="%Y-%m-%d %H:%M:%S")
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
setup_logging(args.loglevel)
_logger.debug("Starting crazy calculations...")
print("The {}-th Fibonacci number is {}".format(args.n, fib(args.n)))
_logger.info("Script ends here")
def run():
"""Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
|
totalgood/twote
|
openchat/skeleton.py
|
Python
|
mit
| 2,865
|
from . import db
class Service(db.Model):
__tablename__ = 'service'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False, unique=True)
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Service %r>' % self.name
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.name
|
gaeun/open-event-orga-server
|
app/models/service.py
|
Python
|
gpl-3.0
| 430
|
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: nil; -*-
### BEGIN LICENSE
# Copyright (C) 2010-2012 Kevin Mehall <km@kevinmehall.net>
#This program is free software: you can redistribute it and/or modify it
#under the terms of the GNU General Public License version 3, as published
#by the Free Software Foundation.
#
#This program is distributed in the hope that it will be useful, but
#WITHOUT ANY WARRANTY; without even the implied warranties of
#MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
#PURPOSE. See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License along
#with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from pithos.plugin import PithosPlugin
import sys
import logging
APP_ID = 'Pithos'
class MediaKeyPlugin(PithosPlugin):
preference = 'enable_mediakeys'
def bind_dbus(self):
try:
import dbus
except ImportError:
return False
try:
bus = dbus.Bus(dbus.Bus.TYPE_SESSION)
mk = bus.get_object("org.gnome.SettingsDaemon","/org/gnome/SettingsDaemon/MediaKeys")
mk.GrabMediaPlayerKeys(APP_ID, 0, dbus_interface='org.gnome.SettingsDaemon.MediaKeys')
mk.connect_to_signal("MediaPlayerKeyPressed", self.mediakey_pressed)
logging.info("Bound media keys with DBUS")
self.method = 'dbus'
return True
except dbus.DBusException:
return False
def mediakey_pressed(self, app, action):
if app == APP_ID:
if action == 'Play':
self.window.playpause_notify()
elif action == 'Next':
self.window.next_song()
elif action == 'Stop':
self.window.user_pause()
elif action == 'Previous':
self.window.bring_to_top()
def bind_keybinder(self):
try:
import gi
gi.require_version('Keybinder', '3.0')
# Gdk needed for Keybinder
from gi.repository import Keybinder, Gdk
Keybinder.init()
except:
return False
Keybinder.bind('XF86AudioPlay', self.window.playpause, None)
Keybinder.bind('XF86AudioStop', self.window.user_pause, None)
Keybinder.bind('XF86AudioNext', self.window.next_song, None)
Keybinder.bind('XF86AudioPrev', self.window.bring_to_top, None)
logging.info("Bound media keys with keybinder")
self.method = 'keybinder'
return True
def kbevent(self, event):
if event.KeyID == 179 or event.Key == 'Media_Play_Pause':
self.window.playpause_notify()
if event.KeyID == 176 or event.Key == 'Media_Next_Track':
self.window.next_song()
return True
def bind_win32(self):
try:
import pyHook
except ImportError:
logging.warning('Please install PyHook: http://www.lfd.uci.edu/~gohlke/pythonlibs/#pyhook')
return False
self.hookman = pyHook.HookManager()
self.hookman.KeyDown = self.kbevent
self.hookman.HookKeyboard()
return True
def on_enable(self):
if sys.platform == 'win32':
self.bind_win32() or logging.error("Could not bind media keys")
else:
self.bind_dbus() or self.bind_keybinder() or logging.error("Could not bind media keys")
def on_disable(self):
logging.error("Not implemented: Can't disable media keys")
|
kyleshrader/pithos
|
pithos/plugins/mediakeys.py
|
Python
|
gpl-3.0
| 3,626
|
import numpy as np
data = np.loadtxt('ecoli.csv', delimiter=',')
t_experiment = data[:,0]
N_experiment = data[:,1]
def error(p):
r = p[0]
T = 1200 # cell can divide after T sec
t_max = 5*T # 5 generations in experiment
t = np.linspace(0, t_max, len(t_experiment))
dt = (t[1] - t[0])
N = np.zeros(t.size)
N[0] = 100
for n in range(0, len(t)-1, 1):
N[n+1] = N[n] + r*dt*N[n]
e = np.sqrt(np.sum((N - N_experiment)**2))/N[0] # error measure
e = abs(N[-1] - N_experiment[-1])/N[0]
print 'r=', r, 'e=',e
return e
from scipy.optimize import minimize
p = minimize(error, [0.0006], tol=1E-5)
print p
|
CompPhysics/MachineLearning
|
doc/src/How2ReadData/src/ecoli_inverse.py
|
Python
|
cc0-1.0
| 659
|
from collections import defaultdict, namedtuple
AclPermission = namedtuple('AclPermission', 'app, action')
# Null rule. Only useful in tests really as no access group should have this.
NONE = AclPermission('None', 'None')
# A special wildcard permission to use when checking if someone has access to
# any admin, or if an admin is accessible by someone with any Admin:<something>
# permission.
ANY_ADMIN = AclPermission('Admin', '%')
# Another special permission, that only few (2-3) admins have. This grants
# access to anything.
SUPERPOWERS = AclPermission('*', '*')
# Can modify editorial content on the site.
ADMIN_CURATION = AclPermission('Admin', 'Curation')
# Can edit the properties of any add-on (pseduo-admin).
ADDONS_EDIT = AclPermission('Addons', 'Edit')
# Can view deleted add-ons in the API.
ADDONS_VIEW_DELETED = AclPermission('Addons', 'ViewDeleted')
# Can view only the reviewer tools.
REVIEWER_TOOLS_VIEW = AclPermission('ReviewerTools', 'View')
# Can view only the reviewer tools.
REVIEWER_TOOLS_UNLISTED_VIEW = AclPermission('ReviewerTools', 'ViewUnlisted')
# These users gain access to the accounts API to super-create users.
ACCOUNTS_SUPER_CREATE = AclPermission('Accounts', 'SuperCreate')
# Can review a listed add-on.
ADDONS_REVIEW = AclPermission('Addons', 'Review')
# Can review an unlisted add-on.
ADDONS_REVIEW_UNLISTED = AclPermission('Addons', 'ReviewUnlisted')
# Can submit a content review for a listed add-on.
ADDONS_CONTENT_REVIEW = AclPermission('Addons', 'ContentReview')
# Can edit the message of the day in the reviewer tools.
ADDON_REVIEWER_MOTD_EDIT = AclPermission('AddonReviewerMOTD', 'Edit')
# Can review a static theme.
STATIC_THEMES_REVIEW = AclPermission('Addons', 'ThemeReview')
# Can review recommend(ed|able) add-ons
ADDONS_RECOMMENDED_REVIEW = AclPermission('Addons', 'RecommendedReview')
# Can edit all collections.
COLLECTIONS_EDIT = AclPermission('Collections', 'Edit')
# Can contribute to community managed collection: COLLECTION_FEATURED_THEMES_ID
COLLECTIONS_CONTRIBUTE = AclPermission('Collections', 'Contribute')
# Can view statistics for all addons, regardless of privacy settings.
STATS_VIEW = AclPermission('Stats', 'View')
# Can submit experiments.
EXPERIMENTS_SUBMIT = AclPermission('Experiments', 'submit')
# Can localize all locales.
LOCALIZER = AclPermission('Localizer', '%')
# Can edit user accounts.
USERS_EDIT = AclPermission('Users', 'Edit')
# Can moderate add-on ratings submitted by users.
RATINGS_MODERATE = AclPermission('Ratings', 'Moderate')
# Can access advanced reviewer features meant for admins, such as disabling an
# add-on or clearing needs admin review flags.
REVIEWS_ADMIN = AclPermission('Reviews', 'Admin')
# Can access advanced admin features, like deletion.
ADMIN_ADVANCED = AclPermission('Admin', 'Advanced')
# Can add/edit/delete DiscoveryItems.
DISCOVERY_EDIT = AclPermission('Discovery', 'Edit')
# Can list/access abuse reports
ABUSEREPORTS_EDIT = AclPermission('AbuseReports', 'Edit')
# Can submit language packs. #11788 and #11793
LANGPACK_SUBMIT = AclPermission('LanguagePack', 'Submit')
# Can submit add-ons signed with Mozilla internal certificate, or add-ons with
# a guid ending with reserved suffixes like @mozilla.com
SYSTEM_ADDON_SUBMIT = AclPermission('SystemAddon', 'Submit')
# Can automatically bypass trademark checks
TRADEMARK_BYPASS = AclPermission('Trademark', 'Bypass')
# Can create AppVersion instances
APPVERSIONS_CREATE = AclPermission('AppVersions', 'Create')
# Can access the scanners results admin.
ADMIN_SCANNERS_RESULTS_VIEW = AclPermission('Admin', 'ScannersResultsView')
# Can use "actions" on the scanners results.
ADMIN_SCANNERS_RESULTS_EDIT = AclPermission('Admin', 'ScannersResultsEdit')
# Can access the scanners rules admin.
ADMIN_SCANNERS_RULES_VIEW = AclPermission('Admin', 'ScannersRulesView')
# Can edit the scanners rules.
ADMIN_SCANNERS_RULES_EDIT = AclPermission('Admin', 'ScannersRulesEdit')
# Can edit things in the scanners query admin (code search).
ADMIN_SCANNERS_QUERY_EDIT = AclPermission('Admin', 'ScannersQueryEdit')
# Can view things the scanners query admin (code search).
ADMIN_SCANNERS_QUERY_VIEW = AclPermission('Admin', 'ScannersQueryView')
# Can access and edit the git extraction admin.
ADMIN_GIT_EXTRACTION_EDIT = AclPermission('Admin', 'GitExtractionEdit')
# Can create/edit a Block in the blocklist - the change may require signoff
BLOCKLIST_CREATE = AclPermission('Blocklist', 'Create')
# Can signoff a Block creation/edit submission
BLOCKLIST_SIGNOFF = AclPermission('Blocklist', 'Signoff')
# All permissions, for easy introspection
PERMISSIONS_LIST = [x for x in vars().values() if isinstance(x, AclPermission)]
# Mapping between django-style object permissions and our own. By default,
# require superuser admins (which also have all other permissions anyway) to do
# something, and then add some custom ones.
DJANGO_PERMISSIONS_MAPPING = defaultdict(lambda: SUPERPOWERS)
DJANGO_PERMISSIONS_MAPPING.update(
{
'abuse.change_abusereport': ABUSEREPORTS_EDIT,
'abuse.delete_abusereport': ADMIN_ADVANCED,
# Note that ActivityLog's ModelAdmin actually forbids deletion entirely.
# This is just here to allow deletion of users, because django checks
# foreign keys even though users are only soft-deleted and related objects
# will be kept.
'activity.delete_activitylog': ADMIN_ADVANCED,
'addons.change_addon': ADDONS_EDIT,
'addons.add_addonuser': ADMIN_ADVANCED,
'addons.change_addonuser': ADMIN_ADVANCED,
'addons.delete_addonuser': ADMIN_ADVANCED,
# Users with Admin:Curation can do anything to ReplacementAddon.
# In addition, the modeladmin will also check for Addons:Edit and give them
# read-only access to the changelist (obj=None passed to the
# has_change_permission() method)
'addons.change_replacementaddon': ADMIN_CURATION,
'addons.add_replacementaddon': ADMIN_CURATION,
'addons.delete_replacementaddon': ADMIN_CURATION,
'bandwagon.change_collection': COLLECTIONS_EDIT,
'bandwagon.delete_collection': ADMIN_ADVANCED,
'blocklist.add_block': BLOCKLIST_CREATE,
'blocklist.change_block': BLOCKLIST_CREATE,
'blocklist.delete_block': BLOCKLIST_CREATE,
'blocklist.view_block': REVIEWS_ADMIN,
'blocklist.add_blocklistsubmission': BLOCKLIST_CREATE,
'blocklist.change_blocklistsubmission': BLOCKLIST_CREATE,
'blocklist.signoff_blocklistsubmission': BLOCKLIST_SIGNOFF,
'blocklist.view_blocklistsubmission': REVIEWS_ADMIN,
'discovery.add_discoveryitem': DISCOVERY_EDIT,
'discovery.change_discoveryitem': DISCOVERY_EDIT,
'discovery.delete_discoveryitem': DISCOVERY_EDIT,
'discovery.add_homepageshelves': DISCOVERY_EDIT,
'discovery.change_homepageshelves': DISCOVERY_EDIT,
'discovery.delete_homepageshelves': DISCOVERY_EDIT,
'discovery.add_primaryheroimageupload': DISCOVERY_EDIT,
'discovery.change_primaryheroimageupload': DISCOVERY_EDIT,
'discovery.delete_primaryheroimageupload': DISCOVERY_EDIT,
'discovery.add_secondaryheroshelf': DISCOVERY_EDIT,
'discovery.change_secondaryheroshelf': DISCOVERY_EDIT,
'discovery.delete_secondaryheroshelf': DISCOVERY_EDIT,
'discovery.add_shelfmodule': DISCOVERY_EDIT,
'discovery.change_shelfmodule': DISCOVERY_EDIT,
'discovery.delete_shelfmodule': DISCOVERY_EDIT,
'discovery.add_promotedaddon': DISCOVERY_EDIT,
'discovery.change_promotedaddon': DISCOVERY_EDIT,
'discovery.delete_promotedaddon': DISCOVERY_EDIT,
'files.change_file': ADMIN_ADVANCED,
'git.change_gitextractionentry': ADMIN_GIT_EXTRACTION_EDIT,
'git.delete_gitextractionentry': ADMIN_GIT_EXTRACTION_EDIT,
'git.view_gitextractionentry': ADMIN_GIT_EXTRACTION_EDIT,
'hero.add_primaryhero': DISCOVERY_EDIT,
'hero.change_primaryhero': DISCOVERY_EDIT,
'hero.delete_primaryhero': DISCOVERY_EDIT,
'hero.add_secondaryheromodule': DISCOVERY_EDIT,
'hero.change_secondaryheromodule': DISCOVERY_EDIT,
'hero.delete_secondaryheromodule': DISCOVERY_EDIT,
'promoted.view_promotedapproval': DISCOVERY_EDIT,
'promoted.delete_promotedapproval': DISCOVERY_EDIT,
'reviewers.delete_reviewerscore': ADMIN_ADVANCED,
'scanners.add_scannerrule': ADMIN_SCANNERS_RULES_EDIT,
'scanners.change_scannerrule': ADMIN_SCANNERS_RULES_EDIT,
'scanners.delete_scannerrule': ADMIN_SCANNERS_RULES_EDIT,
'scanners.view_scannerrule': ADMIN_SCANNERS_RULES_VIEW,
'scanners.view_scannerresult': ADMIN_SCANNERS_RESULTS_VIEW,
'scanners.add_scannerqueryrule': ADMIN_SCANNERS_QUERY_EDIT,
'scanners.change_scannerqueryrule': ADMIN_SCANNERS_QUERY_EDIT,
'scanners.delete_scannerqueryrule': ADMIN_SCANNERS_QUERY_EDIT,
'scanners.change_scannerqueryresult': ADMIN_SCANNERS_QUERY_EDIT,
'scanners.view_scannerqueryrule': ADMIN_SCANNERS_QUERY_VIEW,
'scanners.view_scannerqueryresult': ADMIN_SCANNERS_QUERY_VIEW,
'users.change_userprofile': USERS_EDIT,
'users.delete_userprofile': ADMIN_ADVANCED,
'users.add_disposableemaildomainrestriction,': ADMIN_ADVANCED,
'users.add_emailuserrestriction': ADMIN_ADVANCED,
'users.add_ipnetworkuserrestriction': ADMIN_ADVANCED,
'users.change_disposableemaildomainrestriction,': ADMIN_ADVANCED,
'users.change_emailuserrestriction': ADMIN_ADVANCED,
'users.change_ipnetworkuserrestriction': ADMIN_ADVANCED,
'users.delete_disposableemaildomainrestriction,': ADMIN_ADVANCED,
'users.delete_emailuserrestriction': ADMIN_ADVANCED,
'users.delete_ipnetworkuserrestriction': ADMIN_ADVANCED,
'users.view_userrestrictionhistory': ADMIN_ADVANCED,
'ratings.change_rating': RATINGS_MODERATE,
'ratings.delete_rating': ADMIN_ADVANCED,
'versions.change_version': ADMIN_ADVANCED,
}
)
|
bqbn/addons-server
|
src/olympia/constants/permissions.py
|
Python
|
bsd-3-clause
| 10,108
|
# -*- coding: utf-8 -*-
"""
.. _tut-artifact-overview:
Overview of artifact detection
==============================
This tutorial covers the basics of artifact detection, and introduces the
artifact detection tools available in MNE-Python.
.. contents:: Page contents
:local:
:depth: 2
We begin as always by importing the necessary Python modules and loading some
:ref:`example data <sample-dataset>`:
"""
import os
import numpy as np
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(0, 60).load_data() # just use a fraction of data for speed here
###############################################################################
# What are artifacts?
# ^^^^^^^^^^^^^^^^^^^
#
# Artifacts are parts of the recorded signal that arise from sources other than
# the source of interest (i.e., neuronal activity in the brain). As such,
# artifacts are a form of interference or noise relative to the signal of
# interest. There are many possible causes of such interference, for example:
#
# - Environmental artifacts
# - Persistent oscillations centered around the `AC power line frequency`_
# (typically 50 or 60 Hz)
# - Brief signal jumps due to building vibration (such as a door slamming)
# - Electromagnetic field noise from nearby elevators, cell phones, the
# geomagnetic field, etc.
#
# - Instrumentation artifacts
# - Electromagnetic interference from stimulus presentation (such as EEG
# sensors picking up the field generated by unshielded headphones)
# - Continuous oscillations at specific frequencies used by head position
# indicator (HPI) coils
# - Random high-amplitude fluctuations (or alternatively, constant zero
# signal) in a single channel due to sensor malfunction (e.g., in surface
# electrodes, poor scalp contact)
#
# - Biological artifacts
# - Periodic `QRS`_-like signal patterns (especially in magnetometer
# channels) due to electrical activity of the heart
# - Short step-like deflections (especially in frontal EEG channels) due to
# eye movements
# - Large transient deflections (especially in frontal EEG channels) due to
# blinking
# - Brief bursts of high frequency fluctuations across several channels due
# to the muscular activity during swallowing
#
# There are also some cases where signals from within the brain can be
# considered artifactual. For example, if a researcher is primarily interested
# in the sensory response to a stimulus, but the experimental paradigm involves
# a behavioral response (such as button press), the neural activity associated
# with the planning and executing the button press could be considered an
# artifact relative to signal of interest (i.e., the evoked sensory response).
#
# .. note::
# Artifacts of the same genesis may appear different in recordings made by
# different EEG or MEG systems, due to differences in sensor design (e.g.,
# passive vs. active EEG electrodes; axial vs. planar gradiometers, etc).
#
#
# What to do about artifacts
# ^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# There are 3 basic options when faced with artifacts in your recordings:
#
# 1. *Ignore* the artifact and carry on with analysis
# 2. *Exclude* the corrupted portion of the data and analyze the remaining data
# 3. *Repair* the artifact by suppressing artifactual part of the recording
# while (hopefully) leaving the signal of interest intact
#
# There are many different approaches to repairing artifacts, and MNE-Python
# includes a variety of tools for artifact repair, including digital filtering,
# independent components analysis (ICA), Maxwell filtering / signal-space
# separation (SSS), and signal-space projection (SSP). Separate tutorials
# demonstrate each of these techniques for artifact repair. Many of the
# artifact repair techniques work on both continuous (raw) data and on data
# that has already been epoched (though not necessarily equally well); some can
# be applied to `memory-mapped`_ data while others require the data to be
# copied into RAM. Of course, before you can choose any of these strategies you
# must first *detect* the artifacts, which is the topic of the next section.
#
#
# Artifact detection
# ^^^^^^^^^^^^^^^^^^
#
# MNE-Python includes a few tools for automated detection of certain artifacts
# (such as heartbeats and blinks), but of course you can always visually
# inspect your data to identify and annotate artifacts as well.
#
# We saw in :ref:`the introductory tutorial <tut-overview>` that the example
# data includes :term:`SSP projectors <projector>`, so before we look at
# artifacts let's set aside the projectors in a separate variable and then
# remove them from the :class:`~mne.io.Raw` object using the
# :meth:`~mne.io.Raw.del_proj` method, so that we can inspect our data in it's
# original, raw state:
ssp_projectors = raw.info['projs']
raw.del_proj()
###############################################################################
# Low-frequency drifts
# ~~~~~~~~~~~~~~~~~~~~
#
# Low-frequency drifts are most readily detected by visual inspection using the
# basic :meth:`~mne.io.Raw.plot` method, though it is helpful to plot a
# relatively long time span and to disable channel-wise DC shift correction.
# Here we plot 60 seconds and show all the magnetometer channels:
mag_channels = mne.pick_types(raw.info, meg='mag')
raw.plot(duration=60, order=mag_channels, n_channels=len(mag_channels),
remove_dc=False)
###############################################################################
# Low-frequency drifts are readily removed by high-pass filtering at a fairly
# low cutoff frequency (the wavelength of the drifts seen above is probably
# around 20 seconds, so in this case a cutoff of 0.1 Hz would probably suppress
# most of the drift).
#
#
# Power line noise
# ~~~~~~~~~~~~~~~~
#
# Power line artifacts are easiest to see on plots of the spectrum, so we'll
# use :meth:`~mne.io.Raw.plot_psd` to illustrate.
fig = raw.plot_psd(tmax=np.inf, fmax=250, average=True)
# add some arrows at 60 Hz and its harmonics:
for ax in fig.axes[:2]:
freqs = ax.lines[-1].get_xdata()
psds = ax.lines[-1].get_ydata()
for freq in (60, 120, 180, 240):
idx = np.searchsorted(freqs, freq)
ax.arrow(x=freqs[idx], y=psds[idx] + 18, dx=0, dy=-12, color='red',
width=0.1, head_width=3, length_includes_head=True)
###############################################################################
# Here we see narrow frequency peaks at 60, 120, 180, and 240 Hz — the power
# line frequency of the USA (where the sample data was recorded) and its 2nd,
# 3rd, and 4th harmonics. Other peaks (around 25 to 30 Hz, and the second
# harmonic of those) are probably related to the heartbeat, which is more
# easily seen in the time domain using a dedicated heartbeat detection function
# as described in the next section.
#
#
# Heartbeat artifacts (ECG)
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# MNE-Python includes a dedicated function
# :func:`~mne.preprocessing.find_ecg_events` in the :mod:`mne.preprocessing`
# submodule, for detecting heartbeat artifacts from either dedicated ECG
# channels or from magnetometers (if no ECG channel is present). Additionally,
# the function :func:`~mne.preprocessing.create_ecg_epochs` will call
# :func:`~mne.preprocessing.find_ecg_events` under the hood, and use the
# resulting events array to extract epochs centered around the detected
# heartbeat artifacts. Here we create those epochs, then show an image plot of
# the detected ECG artifacts along with the average ERF across artifacts. We'll
# show all three channel types, even though EEG channels are less strongly
# affected by heartbeat artifacts:
# sphinx_gallery_thumbnail_number = 3
ecg_epochs = mne.preprocessing.create_ecg_epochs(raw)
ecg_epochs.plot_image(combine='mean')
###############################################################################
# The horizontal streaks in the magnetometer image plot reflect the fact that
# the heartbeat artifacts are superimposed on low-frequency drifts like the one
# we saw in an earlier section; to avoid this you could pass
# ``baseline=(-0.5, -0.2)`` in the call to
# :func:`~mne.preprocessing.create_ecg_epochs`.
# You can also get a quick look at the
# ECG-related field pattern across sensors by averaging the ECG epochs together
# via the :meth:`~mne.Epochs.average` method, and then using the
# :meth:`mne.Evoked.plot_topomap` method:
avg_ecg_epochs = ecg_epochs.average()
###############################################################################
# Here again we can visualize the spatial pattern of the associated field at
# various times relative to the peak of the EOG response:
avg_ecg_epochs.plot_topomap(times=np.linspace(-0.05, 0.05, 11))
###############################################################################
# Or, we can get an ERP/F plot with :meth:`~mne.Evoked.plot` or a combined
# scalp field maps and ERP/F plot with :meth:`~mne.Evoked.plot_joint`. Here
# we've specified the times for scalp field maps manually, but if not provided
# they will be chosen automatically based on peaks in the signal:
avg_ecg_epochs.plot_joint(times=[-0.25, -0.025, 0, 0.025, 0.25])
###############################################################################
# Ocular artifacts (EOG)
# ~~~~~~~~~~~~~~~~~~~~~~
#
# Similar to the ECG detection and epoching methods described above, MNE-Python
# also includes functions for detecting and extracting ocular artifacts:
# :func:`~mne.preprocessing.find_eog_events` and
# :func:`~mne.preprocessing.create_eog_epochs`. Once again we'll use the
# higher-level convenience function that automatically finds the artifacts and
# extracts them in to an :class:`~mne.Epochs` object in one step. Unlike the
# heartbeat artifacts seen above, ocular artifacts are usually most prominent
# in the EEG channels, but we'll still show all three channel types. We'll use
# the ``baseline`` parameter this time too; note that there are many fewer
# blinks than heartbeats, which makes the image plots appear somewhat blocky:
eog_epochs = mne.preprocessing.create_eog_epochs(raw, baseline=(-0.5, -0.2))
eog_epochs.plot_image(combine='mean')
eog_epochs.average().plot_joint()
###############################################################################
# Summary
# ^^^^^^^
#
# Familiarizing yourself with typical artifact patterns and magnitudes is a
# crucial first step in assessing the efficacy of later attempts to repair
# those artifacts. A good rule of thumb is that the artifact amplitudes should
# be orders of magnitude larger than your signal of interest — and there should
# be several occurrences of such events — in order to find signal
# decompositions that effectively estimate and repair the artifacts.
#
# Several other tutorials in this section illustrate the various tools for
# artifact repair, and discuss the pros and cons of each technique, for
# example:
#
# - :ref:`tut-artifact-ssp`
# - :ref:`tut-artifact-ica`
# - :ref:`tut-artifact-sss`
#
# There are also tutorials on general-purpose preprocessing steps such as
# :ref:`filtering and resampling <tut-filter-resample>` and :ref:`excluding
# bad channels <tut-bad-channels>` or :ref:`spans of data
# <tut-reject-data-spans>`.
#
# .. LINKS
#
# .. _`AC power line frequency`:
# https://en.wikipedia.org/wiki/Mains_electricity
# .. _`QRS`: https://en.wikipedia.org/wiki/QRS_complex
# .. _`memory-mapped`: https://en.wikipedia.org/wiki/Memory-mapped_file
|
Teekuningas/mne-python
|
tutorials/preprocessing/plot_10_preprocessing_overview.py
|
Python
|
bsd-3-clause
| 11,726
|
# coding=utf-8
from .tokenizer import tokenize
from collections import defaultdict, Counter
from operator import attrgetter
import sys
from tqdm import tqdm
class keydefaultdict(defaultdict):
def __missing__(self, key):
ret = self[key] = self.default_factory(key)
return ret
class Utterance:
def __init__(self, text):
self.text = text
self.count = 0
self.ngrams = set()
def overlaps(self, ngram1, ngram2):
#print(self.text, ngram1.text, ngram2.text)
inds1, inds2 = ngram1.utterances[self], ngram2.utterances[self]
ret = 0
for i1 in inds1:
for i2 in inds2:
#TODO verify all these exactly
if i2 <= i1 <= i1 + ngram1.n <= i2 + ngram2.n:
ret += 1
elif i1 <= i2 <= i2 + ngram2.n <= i1 + ngram1.n:
ret += 0
elif i1 <= i2 < i1 + ngram1.n:
ret += 1 # - (i2 - i1) / ngram1.n
elif i2 <= i1 < i2 + ngram2.n:
ret += 1 # (i1 - i2) / ngram1.n
return ret / (len(inds1) * len(inds2))
class NGram:
def __init__(self, text):
self.n = len(text)
self.text = text
self.utterances = defaultdict(set)
self._count = 0
self.entropy = 0
@property
def count(self):
return self._count
@count.setter
def count(self, value):
self._count = value
self.entropy = self._count * (self.n - 1)
def add(self, utterance, i):
self.count += utterance.count
self.utterances[utterance].add(i)
utterance.ngrams.add(self)
def __repr__(self):
return "'{0}': {1}".format(self.text, self.count)
class NGrams:
def __init__(self, counter):
self.ngrams = keydefaultdict(NGram)
utterances = keydefaultdict(Utterance)
for text, count in counter.items():
utterances[text].count = count
for utterance in tqdm(utterances.values(), 'enumerating ngrams'):
self.from_utterance(utterance)
def from_utterance(self, utterance):
N = len(utterance.text)
for i in range(N - 1):
for n in range(2, N + 1 - i):
self.ngrams[utterance.text[i:i+n]].add(utterance, i)
class SubwordSegmenter:
# TODO MAYBE allow segmentations like " aware " + "ness "
def __init__(self, counter, max_size, force_python=False):
self.cache = dict()
if not force_python:
try:
import julia
self.julia = julia.Julia()
self.julia.using("Revtok")
self.vocab = self.julia.buildvocab(counter, max_size)
return
except ImportError:
print('For faster subwords, please install Julia 0.6, pyjulia, '
'and Revtok.jl. Falling back to Python implementation...')
except Exception as e:
print(e)
print('for faster subwords, please install Revtok.jl. '
'Falling back to Python implementation...')
self.vocab = Counter(''.join(counter.keys())).most_common()
self.vocab.sort(key=lambda tup: (-tup[1], tup[0]))
self.vocab = dict(self.vocab)
ngrams = list(NGrams(counter).ngrams.values())
ngrams.sort(key=attrgetter('text'))
key = attrgetter('entropy')
for i in tqdm(range(max_size - len(self.vocab)), 'building subword vocab'):
ngrams.sort(key=key, reverse=True)
best = ngrams[0]
#print(best)
for utterance in best.utterances:
seen = set([best])
for ngram in utterance.ngrams:
if ngram not in seen:
ngram.count -= utterance.count * utterance.overlaps(ngram, best)
seen.add(ngram)
self.vocab[ngrams[0].text] = ngrams[0].entropy
ngrams = ngrams[1:]
self.julia = None
def __call__(self, utterance, use_julia=False):
if self.julia is not None and use_julia:
return self.julia.segment(utterance, self.vocab)
if isinstance(utterance, list):
return [tok for u in utterance for tok in self(u)]
if utterance in self.vocab:
return [utterance]
if utterance in self.cache:
return self.cache[utterance]
i, segments = 0, {0: []}
while True:
for j in range(i + 1, len(utterance) + 1):
potential_segment = utterance[i:j]
if len(potential_segment) == 1 or potential_segment in self.vocab:
#print(i, j, segments)
curlen = len(segments[j]) if j in segments else len(utterance) + 1
if len(segments[i]) + 1 < curlen:
segments[j] = segments[i] + [potential_segment]
#print(i, segments)
inds = sorted(segments.keys())
if inds.index(i) < len(inds) - 1:
i = inds[inds.index(i) + 1]
else:
break
ret = segments[len(utterance)]
ret = [sys.intern(seg) for seg in ret]
self.cache[utterance] = ret
return ret
class SubwordTokenizer:
def __init__(self, text, max_size):
corpus = tokenize(text, decap=True)
self.segmenter = SubwordSegmenter(Counter(corpus), max_size)
def __call__(self, text):
segments = map(self.segmenter, tokenize(text))
return [tok for word in segments for tok in word]
# #corpus = ['megabyte', 'gigabyte']
# train = tokenize("""
# """)
# test = tokenize("""
# """)
# vocab = build_vocab(train, 1000)
# print(vocab)
# segments = [segment(tok, vocab) for tok in tqdm(test, 'segmenting')]
# print(segments)
# segments = [tok for word in segments for tok in word]
# print(len(segments))
|
jekbradbury/revtok
|
revtok/subwords.py
|
Python
|
mit
| 5,913
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ctypes
import ctypes.util
import logging
import os
import platform
import sys
import time
import threading
GET_TICK_COUNT_LAST_NOW = 0
# If GET_TICK_COUNTER_LAST_NOW is less than the current time, the clock has
# rolled over, and this needs to be accounted for.
GET_TICK_COUNT_WRAPAROUNDS = 0
# The current detected platform
DETECTED_PLATFORM = None
# Mapping of supported platforms and what is returned by sys.platform.
_PLATFORMS = {
'mac': 'darwin',
'linux': 'linux',
'windows': 'win32',
'cygwin': 'cygwin',
'freebsd': 'freebsd',
'sunos': 'sunos5',
'bsd': 'bsd'
}
# Mapping of what to pass get_clocktime based on platform.
_CLOCK_MONOTONIC = {
'linux': 1,
'freebsd': 4,
'bsd': 3,
'sunos5': 4
}
def GetMacNowFunction(plat):
""" Get a monotonic clock for the Mac platform.
Args:
plat: Platform that is being run on. Unused in GetMacNowFunction. Passed
for consistency between initilaizers.
Returns:
Function pointer to monotonic clock for mac platform.
"""
del plat # Unused
global DETECTED_PLATFORM # pylint: disable=global-statement
DETECTED_PLATFORM = 'mac'
libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True)
class MachTimebaseInfoData(ctypes.Structure):
"""System timebase info. Defined in <mach/mach_time.h>."""
_fields_ = (('numer', ctypes.c_uint32),
('denom', ctypes.c_uint32))
mach_absolute_time = libc.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
timebase = MachTimebaseInfoData()
libc.mach_timebase_info(ctypes.byref(timebase))
ticks_per_second = timebase.numer / timebase.denom * 1.0e9
def GetMacNowFunctionImpl():
return mach_absolute_time() / ticks_per_second
return GetMacNowFunctionImpl
def GetClockGetTimeClockNumber(plat):
for key in _CLOCK_MONOTONIC:
if plat.startswith(key):
return _CLOCK_MONOTONIC[key]
raise LookupError('Platform not in clock dicitonary')
def GetLinuxNowFunction(plat):
""" Get a monotonic clock for linux platforms.
Args:
plat: Platform that is being run on.
Returns:
Function pointer to monotonic clock for linux platform.
"""
global DETECTED_PLATFORM # pylint: disable=global-statement
DETECTED_PLATFORM = 'linux'
clock_monotonic = GetClockGetTimeClockNumber(plat)
try:
# Attempt to find clock_gettime in the C library.
clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'),
use_errno=True).clock_gettime
except AttributeError:
# If not able to find int in the C library, look in rt library.
clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'),
use_errno=True).clock_gettime
class Timespec(ctypes.Structure):
"""Time specification, as described in clock_gettime(3)."""
_fields_ = (('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long))
def GetLinuxNowFunctionImpl():
ts = Timespec()
if clock_gettime(clock_monotonic, ctypes.pointer(ts)):
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno))
return ts.tv_sec + ts.tv_nsec / 1.0e9
return GetLinuxNowFunctionImpl
def IsQPCUsable():
""" Determines if system can query the performance counter.
The performance counter is a high resolution timer on windows systems.
Some chipsets have unreliable performance counters, so this checks that one
of those chipsets is not present.
Returns:
True if QPC is useable, false otherwise.
"""
# Sample output: 'Intel64 Family 6 Model 23 Stepping 6, GenuineIntel'
info = platform.processor()
if 'AuthenticAMD' in info and 'Family 15' in info:
return False
try: # If anything goes wrong during this, assume QPC isn't available.
frequency = ctypes.c_int64()
ctypes.windll.Kernel32.QueryPerformanceFrequency(
ctypes.byref(frequency))
if float(frequency.value) <= 0:
return False
except Exception: # pylint: disable=broad-except
logging.exception('Error when determining if QPC is usable.')
return False
return True
def GetWinNowFunction(plat):
""" Get a monotonic clock for windows platforms.
Args:
plat: Platform that is being run on.
Returns:
Function pointer to monotonic clock for windows platform.
"""
global DETECTED_PLATFORM # pylint: disable=global-statement
DETECTED_PLATFORM = 'windows'
if IsQPCUsable():
qpc_return = ctypes.c_int64()
qpc_frequency = ctypes.c_int64()
ctypes.windll.Kernel32.QueryPerformanceFrequency(
ctypes.byref(qpc_frequency))
qpc_frequency = float(qpc_frequency.value)
qpc = ctypes.windll.Kernel32.QueryPerformanceCounter
def GetWinNowFunctionImpl():
qpc(ctypes.byref(qpc_return))
return qpc_return.value / qpc_frequency
else:
kernel32 = (ctypes.cdll.kernel32
if plat.startswith(_PLATFORMS['cygwin'])
else ctypes.windll.kernel32)
get_tick_count_64 = getattr(kernel32, 'GetTickCount64', None)
# Windows Vista or newer
if get_tick_count_64:
get_tick_count_64.restype = ctypes.c_ulonglong
def GetWinNowFunctionImpl():
return get_tick_count_64() / 1000.0
else: # Pre Vista.
get_tick_count = kernel32.GetTickCount
get_tick_count.restype = ctypes.c_uint32
get_tick_count_lock = threading.Lock()
def GetWinNowFunctionImpl():
global GET_TICK_COUNT_LAST_NOW # pylint: disable=global-statement
global GET_TICK_COUNT_WRAPAROUNDS # pylint: disable=global-statement
with get_tick_count_lock:
current_sample = get_tick_count()
if current_sample < GET_TICK_COUNT_LAST_NOW:
GET_TICK_COUNT_WRAPAROUNDS += 1
GET_TICK_COUNT_LAST_NOW = current_sample
final_ms = GET_TICK_COUNT_WRAPAROUNDS << 32
final_ms += GET_TICK_COUNT_LAST_NOW
return final_ms / 1000.0
return GetWinNowFunctionImpl
def InitializeNowFunction(plat):
""" Get a monotonic clock for the current platform.
Args:
plat: Platform that is being run on.
Returns:
Function pointer to monotonic clock function for current platform.
"""
if plat.startswith(_PLATFORMS['mac']):
return GetMacNowFunction(plat)
elif (plat.startswith(_PLATFORMS['linux'])
or plat.startswith(_PLATFORMS['freebsd'])
or plat.startswith(_PLATFORMS['bsd'])
or plat.startswith(_PLATFORMS['sunos'])):
return GetLinuxNowFunction(plat)
elif (plat.startswith(_PLATFORMS['windows'])
or plat.startswith(_PLATFORMS['cygwin'])):
return GetWinNowFunction(plat)
else:
raise RuntimeError('%s is not a supported platform.' % plat)
def Now():
return monotonic()
monotonic = InitializeNowFunction(sys.platform)
|
SummerLW/Perf-Insight-Report
|
tools/py_trace_event/py_trace_event/trace_time.py
|
Python
|
bsd-3-clause
| 6,948
|
"""
Python Zendesk is wrapper for the Zendesk API. This library provides an
easy and flexible way for developers to communicate with their Zendesk
account in their application.
Notes:
API THROTTLE is not handled in this library:
From Zendesk: The throttle will be applied once an API consumer
reaches a certain threshold in terms of a maximum of requests per
minute. Most clients will never hit this threshold, but those that do,
will get met by a HTTP 503 response code and a text body of
"Number of allowed API requests per minute exceeded"
TICKETS AS AN END-USER is not handled in this library:
There are a number of API calls for working with helpdesk tickets as
if you were the person (end-user) who originally created them. At the
moment, this library is primarily for admins and agents to interact
FORUMS, ENTRIES, POSTS have not yet been implemented
"""
__author__ = "Max Gutman <max@eventbrite.com>"
__version__ = "1.1.1"
import re
import httplib2
import urllib
import base64
try:
import simplejson as json
except:
import json
# from httplib import responses
from .endpoints import mapping_table as mapping_table_v1
from .endpoints_v2 import mapping_table as mapping_table_v2
V2_COLLECTION_PARAMS = [
'page',
'per_page',
'sort_order',
]
class ZendeskError(Exception):
def __init__(self, msg, error_code=None):
self.msg = msg
self.error_code = error_code
# Zendesk will throw a 401 response for un-authneticated call
if self.error_code == 401:
raise AuthenticationError(self.msg)
def __str__(self):
return repr('%s: %s' % (self.error_code, self.msg))
class AuthenticationError(ZendeskError):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
re_identifier = re.compile(r".*/(?P<identifier>\d+)\.(json|xml)")
def get_id_from_url(url):
match = re_identifier.match(url)
if match and match.group('identifier'):
return match.group('identifier')
def clean_kwargs(kwargs):
"""Format the kwargs to conform to API"""
for key, value in kwargs.iteritems():
if hasattr(value, '__iter__'):
kwargs[key] = ','.join(map(str, value))
class Zendesk(object):
""" Python API Wrapper for Zendesk"""
def __init__(self, zendesk_url, zendesk_username=None,
zendesk_password=None, use_api_token=False, headers=None,
client_args={}, api_version=1):
"""
Instantiates an instance of Zendesk. Takes optional parameters for
HTTP Basic Authentication
Parameters:
zendesk_url - https://company.zendesk.com (use http if not SSL enabled)
zendesk_username - Specific to your Zendesk account (typically email)
zendesk_password - Specific to your Zendesk account or your account's
API token if use_api_token is True
use_api_token - use api token for authentication instead of user's
actual password
headers - Pass headers in dict form. This will override default.
client_args - Pass arguments to http client in dict form.
{'cache': False, 'timeout': 2}
or a common one is to disable SSL certficate validation
{"disable_ssl_certificate_validation": True}
"""
self.data = None
# Set attributes necessary for API
self.zendesk_url = zendesk_url.rstrip('/')
self.zendesk_username = zendesk_username
if use_api_token:
self.zendesk_username += "/token"
self.zendesk_password = zendesk_password
# Set headers
self.headers = headers
if self.headers is None:
self.headers = {
'User-agent': 'Zendesk Python Library v%s' % __version__,
'Content-Type': 'application/json'
}
# Set http client and authentication
self.client = httplib2.Http(**client_args)
if (self.zendesk_username is not None and
self.zendesk_password is not None):
self.client.add_credentials(
self.zendesk_username,
self.zendesk_password
)
self.api_version = api_version
if self.api_version == 1:
self.mapping_table = mapping_table_v1
elif self.api_version == 2:
self.mapping_table = mapping_table_v2
else:
raise ValueError("Unsupported Zendesk API Version: %d" %
(self.api_version,))
def __getattr__(self, api_call):
"""
Instead of writing out each API endpoint as a method here or
binding the API endpoints at instance runttime, we can simply
use an elegant Python technique to construct method execution on-
demand. We simply provide a mapping table between Zendesk API calls
and function names (with necessary parameters to replace
embedded keywords on GET or json data on POST/PUT requests).
__getattr__() is used as callback method implemented so that
when an object tries to call a method which is not defined here,
it looks to find a relationship in the the mapping table. The
table provides the structure of the API call and parameters passed
in the method will populate missing data.
TODO:
Should probably url-encode GET query parameters on replacement
"""
def call(self, **kwargs):
""" """
api_map = self.mapping_table[api_call]
path = api_map['path']
if self.api_version == 2:
path = "/api/v2" + path
method = api_map['method']
status = api_map['status']
valid_params = api_map.get('valid_params', ())
# Body can be passed from data or in args
body = kwargs.pop('data', None) or self.data
# Substitute mustache placeholders with data from keywords
url = re.sub(
'\{\{(?P<m>[a-zA-Z_]+)\}\}',
# Optional pagination parameters will default to blank
lambda m: "%s" % kwargs.pop(m.group(1), ''),
self.zendesk_url + path
)
# Validate remaining kwargs against valid_params and add
# params url encoded to url variable.
for kw in kwargs:
if (kw not in valid_params and
(self.api_version == 2 and
kw not in V2_COLLECTION_PARAMS)):
raise TypeError("%s() got an unexpected keyword argument "
"'%s'" % (api_call, kw))
else:
clean_kwargs(kwargs)
url += '?' + urllib.urlencode(kwargs)
# the 'search' endpoint in an open Zendesk site doesn't return a
# 401 to force authentication. Inject the credentials in the
# headers to ensure we get the results we're looking for
if re.match("^/search\..*", path):
self.headers["Authorization"] = "Basic %s" % (
base64.b64encode(self.zendesk_username + ':' +
self.zendesk_password))
elif "Authorization" in self.headers:
del(self.headers["Authorization"])
# Make an http request (data replacements are finalized)
response, content = \
self.client.request(
url,
method,
body=json.dumps(body),
headers=self.headers
)
# Use a response handler to determine success/fail
return self._response_handler(response, content, status)
# Missing method is also not defined in our mapping table
if api_call not in self.mapping_table:
raise AttributeError('Method "%s" Does Not Exist' % api_call)
# Execute dynamic method and pass in keyword args as data to API call
return call.__get__(self)
@staticmethod
def _response_handler(response, content, status):
"""
Handle response as callback
If the response status is different from status defined in the
mapping table, then we assume an error and raise proper exception
Zendesk's response is sometimes the url of a newly created user/
ticket/group/etc and they pass this through 'location'. Otherwise,
the body of 'content' has our response.
"""
# Just in case
if not response:
raise ZendeskError('Response Not Found')
response_status = int(response.get('status', 0))
if response_status != status:
raise ZendeskError(content, response_status)
# Deserialize json content if content exist. In some cases Zendesk
# returns ' ' strings. Also return false non strings (0, [], (), {})
if response.get('location'):
return response.get('location')
elif content.strip():
return json.loads(content)
# else:
# return responses[response_status]
|
concordusapps/zendesk-python3
|
zendesk/zendesk.py
|
Python
|
mit
| 9,282
|
import sys
from setuptools import setup
__version__ = ''
with open('streak_client/__init__.py') as inp:
for line in inp:
if (line.startswith('__version__')):
exec(line.strip())
break
setup( name='streak_client',
version= __version__,
description='simple flat client class for streak.com api with helper objects',
long_description = 'Streak API Client in Python',
url='http://github.com/mehmetg/streak_client',
author='Mehmet Gerceker',
author_email='mehmetg@msn.com',
license='MIT',
packages=['streak_client'],
package_dir={'streak_client': 'streak_client'},
keywords=( 'streak', 'api'),
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
provides=[],
install_requires=[ 'requests' ],
#message_extractors={},
#entry_points = {
# 'console_scripts': [
# 'streak_client = streak_client.streak_client:main',
# 'streak_client%s = streak_client.streak_client:main' % sys.version_info[0],
# ],
# 'distutil.commands': [
# 'streak_client = streak_client:main'
# ],
#},
zip_safe=False,
test_suite = 'streak_client_tests',
#tests_require = [],
#test_loader = '',
)
|
mehmetg/streak_client
|
setup.py
|
Python
|
mit
| 1,702
|
class ToolkitTask(object):
'''
A base class for provisioning and configuration operations that
should be executed by the task service.
To support pause and resume workflow, every ToolkitTask should
provide a serializable 'params' dict which contains all the details
it requires to execute when provided at a later point in time.
Every task may model its operations as subtasks of its own if it requires
pause and resume at the subtask level. The subtasks will get executed
serially in the order in which they have been added.
'''
def __init__(self, tkctx, params):
'''
Construct the task, giving the application+customer scoping context
in which the task should be executed.
Arguments:
tkctx - The toolkit context in which the task's operations should
be executed.
params - A dict with all the parameters required for the task
to execute. This is provided either by a cluster operation
or loaded from the task execution plan database because of a
resume operation request after it was paused.
'''
self.tkctx = tkctx
self._params = params
def params(self):
'''
Returns the params dict that contains all the information required
by this task to execute at any point of time.
'''
return self._params
def subtasks(self):
'''
Return the subtasks that comprise this task if the task is to be modelled
as a sequence of subtasks. If the task does not require or does not
prefer such modelling, it should just return None.
'''
return None
def execute(self):
'''
Execute the operations of this task.
'''
raise NotImplementedError('Subclasses should override execute')
|
pathbreak/linode-cluster-toolkit
|
lct/tasks/toolkit_task.py
|
Python
|
mit
| 2,022
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from scrapy.exceptions import DropItem
class PrecioPipeline(object):
impuesto = 18.0
def process_item(self, item, spider):
if item['precio']:
if item['precio_sin_tasa']:
item['precio'] = item['precio'] * self.impuesto
return item
else:
raise DropItem("No tiene tasa alguna el elemento: %s" % item)
|
francrodriguez/scrapy-FrancRodriguez
|
ejemplos/pipeline.py
|
Python
|
lgpl-3.0
| 429
|
import errno
import os
import sys
import time
import traceback
import types
import warnings
from eventlet.green import BaseHTTPServer
from eventlet.green import socket
from eventlet.green import urllib
from eventlet import greenio
from eventlet import greenpool
from eventlet import support
from eventlet.support import six
DEFAULT_MAX_SIMULTANEOUS_REQUESTS = 1024
DEFAULT_MAX_HTTP_VERSION = 'HTTP/1.1'
MAX_REQUEST_LINE = 8192
MAX_HEADER_LINE = 8192
MAX_TOTAL_HEADER_SIZE = 65536
MINIMUM_CHUNK_SIZE = 4096
# %(client_port)s is also available
DEFAULT_LOG_FORMAT = ('%(client_ip)s - - [%(date_time)s] "%(request_line)s"'
' %(status_code)s %(body_length)s %(wall_seconds).6f')
is_accepting = True
__all__ = ['server', 'format_date_time']
# Weekday and month names for HTTP date/time formatting; always English!
_weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
_monthname = [None, # Dummy so we can use 1-based month numbers
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
def format_date_time(timestamp):
"""Formats a unix timestamp into an HTTP standard string."""
year, month, day, hh, mm, ss, wd, _y, _z = time.gmtime(timestamp)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
_weekdayname[wd], day, _monthname[month], year, hh, mm, ss
)
# Collections of error codes to compare against. Not all attributes are set
# on errno module on all platforms, so some are literals :(
BAD_SOCK = set((errno.EBADF, 10053))
BROKEN_SOCK = set((errno.EPIPE, errno.ECONNRESET))
# special flag return value for apps
class _AlreadyHandled(object):
def __iter__(self):
return self
def next(self):
raise StopIteration
__next__ = next
ALREADY_HANDLED = _AlreadyHandled()
class Input(object):
def __init__(self,
rfile,
content_length,
sock,
wfile=None,
wfile_line=None,
chunked_input=False):
self.rfile = rfile
self._sock = sock
if content_length is not None:
content_length = int(content_length)
self.content_length = content_length
self.wfile = wfile
self.wfile_line = wfile_line
self.position = 0
self.chunked_input = chunked_input
self.chunk_length = -1
# (optional) headers to send with a "100 Continue" response. Set by
# calling set_hundred_continue_respose_headers() on env['wsgi.input']
self.hundred_continue_headers = None
self.is_hundred_continue_response_sent = False
def send_hundred_continue_response(self):
towrite = []
# 100 Continue status line
towrite.append(self.wfile_line)
# Optional headers
if self.hundred_continue_headers is not None:
# 100 Continue headers
for header in self.hundred_continue_headers:
towrite.append(six.b('%s: %s\r\n' % header))
# Blank line
towrite.append(b'\r\n')
self.wfile.writelines(towrite)
# Reinitialize chunk_length (expect more data)
self.chunk_length = -1
def _do_read(self, reader, length=None):
if self.wfile is not None and \
not self.is_hundred_continue_response_sent:
# 100 Continue response
self.send_hundred_continue_response()
self.is_hundred_continue_response_sent = True
if length is None and self.content_length is not None:
length = self.content_length - self.position
if length and length > self.content_length - self.position:
length = self.content_length - self.position
if not length:
return b''
try:
read = reader(length)
except greenio.SSL.ZeroReturnError:
read = b''
self.position += len(read)
return read
def _chunked_read(self, rfile, length=None, use_readline=False):
if self.wfile is not None and \
not self.is_hundred_continue_response_sent:
# 100 Continue response
self.send_hundred_continue_response()
self.is_hundred_continue_response_sent = True
try:
if length == 0:
return ""
if length and length < 0:
length = None
if use_readline:
reader = self.rfile.readline
else:
reader = self.rfile.read
response = []
while self.chunk_length != 0:
maxreadlen = self.chunk_length - self.position
if length is not None and length < maxreadlen:
maxreadlen = length
if maxreadlen > 0:
data = reader(maxreadlen)
if not data:
self.chunk_length = 0
raise IOError("unexpected end of file while parsing chunked data")
datalen = len(data)
response.append(data)
self.position += datalen
if self.chunk_length == self.position:
rfile.readline()
if length is not None:
length -= datalen
if length == 0:
break
if use_readline and data[-1] == "\n":
break
else:
self.chunk_length = int(rfile.readline().split(b";", 1)[0], 16)
self.position = 0
if self.chunk_length == 0:
rfile.readline()
except greenio.SSL.ZeroReturnError:
pass
return b''.join(response)
def read(self, length=None):
if self.chunked_input:
return self._chunked_read(self.rfile, length)
return self._do_read(self.rfile.read, length)
def readline(self, size=None):
if self.chunked_input:
return self._chunked_read(self.rfile, size, True)
else:
return self._do_read(self.rfile.readline, size)
def readlines(self, hint=None):
return self._do_read(self.rfile.readlines, hint)
def __iter__(self):
return iter(self.read, b'')
def get_socket(self):
return self._sock
def set_hundred_continue_response_headers(self, headers,
capitalize_response_headers=True):
# Response headers capitalization (default)
# CONTent-TYpe: TExt/PlaiN -> Content-Type: TExt/PlaiN
# Per HTTP RFC standard, header name is case-insensitive.
# Please, fix your client to ignore header case if possible.
if capitalize_response_headers:
headers = [
('-'.join([x.capitalize() for x in key.split('-')]), value)
for key, value in headers]
self.hundred_continue_headers = headers
class HeaderLineTooLong(Exception):
pass
class HeadersTooLarge(Exception):
pass
class FileObjectForHeaders(object):
def __init__(self, fp):
self.fp = fp
self.total_header_size = 0
def readline(self, size=-1):
sz = size
if size < 0:
sz = MAX_HEADER_LINE
rv = self.fp.readline(sz)
if len(rv) >= MAX_HEADER_LINE:
raise HeaderLineTooLong()
self.total_header_size += len(rv)
if self.total_header_size > MAX_TOTAL_HEADER_SIZE:
raise HeadersTooLarge()
return rv
class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
minimum_chunk_size = MINIMUM_CHUNK_SIZE
capitalize_response_headers = True
def setup(self):
# overriding SocketServer.setup to correctly handle SSL.Connection objects
conn = self.connection = self.request
try:
self.rfile = conn.makefile('rb', self.rbufsize)
self.wfile = conn.makefile('wb', self.wbufsize)
except (AttributeError, NotImplementedError):
if hasattr(conn, 'send') and hasattr(conn, 'recv'):
# it's an SSL.Connection
self.rfile = socket._fileobject(conn, "rb", self.rbufsize)
self.wfile = socket._fileobject(conn, "wb", self.wbufsize)
else:
# it's a SSLObject, or a martian
raise NotImplementedError("wsgi.py doesn't support sockets "
"of type %s" % type(conn))
def handle_one_request(self):
if self.server.max_http_version:
self.protocol_version = self.server.max_http_version
if self.rfile.closed:
self.close_connection = 1
return
try:
self.raw_requestline = self.rfile.readline(self.server.url_length_limit)
if len(self.raw_requestline) == self.server.url_length_limit:
self.wfile.write(
b"HTTP/1.0 414 Request URI Too Long\r\n"
b"Connection: close\r\nContent-length: 0\r\n\r\n")
self.close_connection = 1
return
except greenio.SSL.ZeroReturnError:
self.raw_requestline = ''
except socket.error as e:
if support.get_errno(e) not in BAD_SOCK:
raise
self.raw_requestline = ''
if not self.raw_requestline:
self.close_connection = 1
return
orig_rfile = self.rfile
try:
self.rfile = FileObjectForHeaders(self.rfile)
if not self.parse_request():
return
except HeaderLineTooLong:
self.wfile.write(
b"HTTP/1.0 400 Header Line Too Long\r\n"
b"Connection: close\r\nContent-length: 0\r\n\r\n")
self.close_connection = 1
return
except HeadersTooLarge:
self.wfile.write(
b"HTTP/1.0 400 Headers Too Large\r\n"
b"Connection: close\r\nContent-length: 0\r\n\r\n")
self.close_connection = 1
return
finally:
self.rfile = orig_rfile
content_length = self.headers.get('content-length')
if content_length:
try:
int(content_length)
except ValueError:
self.wfile.write(
b"HTTP/1.0 400 Bad Request\r\n"
b"Connection: close\r\nContent-length: 0\r\n\r\n")
self.close_connection = 1
return
self.environ = self.get_environ()
self.application = self.server.app
try:
self.server.outstanding_requests += 1
try:
self.handle_one_response()
except socket.error as e:
# Broken pipe, connection reset by peer
if support.get_errno(e) not in BROKEN_SOCK:
raise
finally:
self.server.outstanding_requests -= 1
def handle_one_response(self):
start = time.time()
headers_set = []
headers_sent = []
wfile = self.wfile
result = None
use_chunked = [False]
length = [0]
status_code = [200]
def write(data, _writelines=wfile.writelines):
towrite = []
if not headers_set:
raise AssertionError("write() before start_response()")
elif not headers_sent:
status, response_headers = headers_set
headers_sent.append(1)
header_list = [header[0].lower() for header in response_headers]
towrite.append(six.b('%s %s\r\n' % (self.protocol_version, status)))
for header in response_headers:
towrite.append(six.b('%s: %s\r\n' % header))
# send Date header?
if 'date' not in header_list:
towrite.append(six.b('Date: %s\r\n' % (format_date_time(time.time()),)))
client_conn = self.headers.get('Connection', '').lower()
send_keep_alive = False
if self.close_connection == 0 and \
self.server.keepalive and (client_conn == 'keep-alive' or
(self.request_version == 'HTTP/1.1' and
not client_conn == 'close')):
# only send keep-alives back to clients that sent them,
# it's redundant for 1.1 connections
send_keep_alive = (client_conn == 'keep-alive')
self.close_connection = 0
else:
self.close_connection = 1
if 'content-length' not in header_list:
if self.request_version == 'HTTP/1.1':
use_chunked[0] = True
towrite.append(b'Transfer-Encoding: chunked\r\n')
elif 'content-length' not in header_list:
# client is 1.0 and therefore must read to EOF
self.close_connection = 1
if self.close_connection:
towrite.append(b'Connection: close\r\n')
elif send_keep_alive:
towrite.append(b'Connection: keep-alive\r\n')
towrite.append(b'\r\n')
# end of header writing
if use_chunked[0]:
# Write the chunked encoding
towrite.append(six.b("%x" % (len(data),)) + b"\r\n" + data + b"\r\n")
else:
towrite.append(data)
try:
_writelines(towrite)
length[0] = length[0] + sum(map(len, towrite))
except UnicodeEncodeError:
self.server.log_message(
"Encountered non-ascii unicode while attempting to write"
"wsgi response: %r" %
[x for x in towrite if isinstance(x, six.text_type)])
self.server.log_message(traceback.format_exc())
_writelines(
["HTTP/1.1 500 Internal Server Error\r\n",
"Connection: close\r\n",
"Content-type: text/plain\r\n",
"Content-length: 98\r\n",
"Date: %s\r\n" % format_date_time(time.time()),
"\r\n",
("Internal Server Error: wsgi application passed "
"a unicode object to the server instead of a string.")])
def start_response(status, response_headers, exc_info=None):
status_code[0] = status.split()[0]
if exc_info:
try:
if headers_sent:
# Re-raise original exception if headers sent
six.reraise(exc_info[0], exc_info[1], exc_info[2])
finally:
# Avoid dangling circular ref
exc_info = None
# Response headers capitalization
# CONTent-TYpe: TExt/PlaiN -> Content-Type: TExt/PlaiN
# Per HTTP RFC standard, header name is case-insensitive.
# Please, fix your client to ignore header case if possible.
if self.capitalize_response_headers:
response_headers = [
('-'.join([x.capitalize() for x in key.split('-')]), value)
for key, value in response_headers]
headers_set[:] = [status, response_headers]
return write
try:
try:
result = self.application(self.environ, start_response)
if (isinstance(result, _AlreadyHandled)
or isinstance(getattr(result, '_obj', None), _AlreadyHandled)):
self.close_connection = 1
return
# Set content-length if possible
if not headers_sent and hasattr(result, '__len__') and \
'Content-Length' not in [h for h, _v in headers_set[1]]:
headers_set[1].append(('Content-Length', str(sum(map(len, result)))))
towrite = []
towrite_size = 0
just_written_size = 0
minimum_write_chunk_size = int(self.environ.get(
'eventlet.minimum_write_chunk_size', self.minimum_chunk_size))
for data in result:
towrite.append(data)
towrite_size += len(data)
if towrite_size >= minimum_write_chunk_size:
write(b''.join(towrite))
towrite = []
just_written_size = towrite_size
towrite_size = 0
if towrite:
just_written_size = towrite_size
write(b''.join(towrite))
if not headers_sent or (use_chunked[0] and just_written_size):
write(b'')
except Exception:
self.close_connection = 1
tb = traceback.format_exc()
self.server.log_message(tb)
if not headers_set:
err_body = six.b(tb) if self.server.debug else b''
start_response("500 Internal Server Error",
[('Content-type', 'text/plain'),
('Content-length', len(err_body))])
write(err_body)
finally:
if hasattr(result, 'close'):
result.close()
if (self.environ['eventlet.input'].chunked_input or
self.environ['eventlet.input'].position
< (self.environ['eventlet.input'].content_length or 0)):
# Read and discard body if there was no pending 100-continue
if not self.environ['eventlet.input'].wfile:
# NOTE: MINIMUM_CHUNK_SIZE is used here for purpose different than chunking.
# We use it only cause it's at hand and has reasonable value in terms of
# emptying the buffer.
while self.environ['eventlet.input'].read(MINIMUM_CHUNK_SIZE):
pass
finish = time.time()
for hook, args, kwargs in self.environ['eventlet.posthooks']:
hook(self.environ, *args, **kwargs)
if self.server.log_output:
self.server.log_message(self.server.log_format % {
'client_ip': self.get_client_ip(),
'client_port': self.client_address[1],
'date_time': self.log_date_time_string(),
'request_line': self.requestline,
'status_code': status_code[0],
'body_length': length[0],
'wall_seconds': finish - start,
})
def get_client_ip(self):
client_ip = self.client_address[0]
if self.server.log_x_forwarded_for:
forward = self.headers.get('X-Forwarded-For', '').replace(' ', '')
if forward:
client_ip = "%s,%s" % (forward, client_ip)
return client_ip
def get_environ(self):
env = self.server.get_environ()
env['REQUEST_METHOD'] = self.command
env['SCRIPT_NAME'] = ''
pq = self.path.split('?', 1)
env['RAW_PATH_INFO'] = pq[0]
env['PATH_INFO'] = urllib.unquote(pq[0])
if len(pq) > 1:
env['QUERY_STRING'] = pq[1]
ct = self.headers.get('content-type')
if ct is None:
try:
ct = self.headers.type
except AttributeError:
ct = self.headers.get_content_type()
env['CONTENT_TYPE'] = ct
length = self.headers.get('content-length')
if length:
env['CONTENT_LENGTH'] = length
env['SERVER_PROTOCOL'] = 'HTTP/1.0'
host, port = self.request.getsockname()[:2]
env['SERVER_NAME'] = host
env['SERVER_PORT'] = str(port)
env['REMOTE_ADDR'] = self.client_address[0]
env['REMOTE_PORT'] = str(self.client_address[1])
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
try:
headers = self.headers.headers
except AttributeError:
headers = self.headers._headers
else:
headers = [h.split(':', 1) for h in headers]
for k, v in headers:
k = k.replace('-', '_').upper()
v = v.strip()
if k in env:
continue
envk = 'HTTP_' + k
if envk in env:
env[envk] += ',' + v
else:
env[envk] = v
if env.get('HTTP_EXPECT') == '100-continue':
wfile = self.wfile
wfile_line = b'HTTP/1.1 100 Continue\r\n'
else:
wfile = None
wfile_line = None
chunked = env.get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked'
env['wsgi.input'] = env['eventlet.input'] = Input(
self.rfile, length, self.connection, wfile=wfile, wfile_line=wfile_line,
chunked_input=chunked)
env['eventlet.posthooks'] = []
return env
def finish(self):
try:
BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
except socket.error as e:
# Broken pipe, connection reset by peer
if support.get_errno(e) not in BROKEN_SOCK:
raise
greenio.shutdown_safe(self.connection)
self.connection.close()
def handle_expect_100(self):
return True
class Server(BaseHTTPServer.HTTPServer):
def __init__(self,
socket,
address,
app,
log=None,
environ=None,
max_http_version=None,
protocol=HttpProtocol,
minimum_chunk_size=None,
log_x_forwarded_for=True,
keepalive=True,
log_output=True,
log_format=DEFAULT_LOG_FORMAT,
url_length_limit=MAX_REQUEST_LINE,
debug=True,
socket_timeout=None,
capitalize_response_headers=True):
self.outstanding_requests = 0
self.socket = socket
self.address = address
if log:
self.log = log
else:
self.log = sys.stderr
self.app = app
self.keepalive = keepalive
self.environ = environ
self.max_http_version = max_http_version
self.protocol = protocol
self.pid = os.getpid()
self.minimum_chunk_size = minimum_chunk_size
self.log_x_forwarded_for = log_x_forwarded_for
self.log_output = log_output
self.log_format = log_format
self.url_length_limit = url_length_limit
self.debug = debug
self.socket_timeout = socket_timeout
self.capitalize_response_headers = capitalize_response_headers
if not self.capitalize_response_headers:
warnings.warn("""capitalize_response_headers is disabled.
Please, make sure you know what you are doing.
HTTP headers names are case-insensitive per RFC standard.
Most likely, you need to fix HTTP parsing in your client software.""",
DeprecationWarning, stacklevel=3)
def get_environ(self):
d = {
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.multithread': True,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
}
# detect secure socket
if hasattr(self.socket, 'do_handshake'):
d['wsgi.url_scheme'] = 'https'
d['HTTPS'] = 'on'
if self.environ is not None:
d.update(self.environ)
return d
def process_request(self, sock_params):
# The actual request handling takes place in __init__, so we need to
# set minimum_chunk_size before __init__ executes and we don't want to modify
# class variable
sock, address = sock_params
proto = new(self.protocol)
if self.minimum_chunk_size is not None:
proto.minimum_chunk_size = self.minimum_chunk_size
proto.capitalize_response_headers = self.capitalize_response_headers
try:
proto.__init__(sock, address, self)
except socket.timeout:
# Expected exceptions are not exceptional
sock.close()
if self.debug:
# similar to logging "accepted" in server()
self.log_message('(%s) timed out %r' % (self.pid, address))
def log_message(self, message):
self.log.write(message + '\n')
try:
new = types.InstanceType
except AttributeError:
new = lambda cls: cls.__new__(cls)
try:
import ssl
ACCEPT_EXCEPTIONS = (socket.error, ssl.SSLError)
ACCEPT_ERRNO = set((errno.EPIPE, errno.EBADF, errno.ECONNRESET,
ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_SSL))
except ImportError:
ACCEPT_EXCEPTIONS = (socket.error,)
ACCEPT_ERRNO = set((errno.EPIPE, errno.EBADF, errno.ECONNRESET))
def server(sock, site,
log=None,
environ=None,
max_size=None,
max_http_version=DEFAULT_MAX_HTTP_VERSION,
protocol=HttpProtocol,
server_event=None,
minimum_chunk_size=None,
log_x_forwarded_for=True,
custom_pool=None,
keepalive=True,
log_output=True,
log_format=DEFAULT_LOG_FORMAT,
url_length_limit=MAX_REQUEST_LINE,
debug=True,
socket_timeout=None,
capitalize_response_headers=True):
"""Start up a WSGI server handling requests from the supplied server
socket. This function loops forever. The *sock* object will be
closed after server exits, but the underlying file descriptor will
remain open, so if you have a dup() of *sock*, it will remain usable.
.. warning::
At the moment :func:`server` will always wait for active connections to finish before
exiting, even if there's an exception raised inside it
(*all* exceptions are handled the same way, including :class:`greenlet.GreenletExit`
and those inheriting from `BaseException`).
While this may not be an issue normally, when it comes to long running HTTP connections
(like :mod:`eventlet.websocket`) it will become problematic and calling
:meth:`~eventlet.greenthread.GreenThread.wait` on a thread that runs the server may hang,
even after using :meth:`~eventlet.greenthread.GreenThread.kill`, as long
as there are active connections.
:param sock: Server socket, must be already bound to a port and listening.
:param site: WSGI application function.
:param log: File-like object that logs should be written to.
If not specified, sys.stderr is used.
:param environ: Additional parameters that go into the environ dictionary of every request.
:param max_size: Maximum number of client connections opened at any time by this server.
:param max_http_version: Set to "HTTP/1.0" to make the server pretend it only supports HTTP 1.0.
This can help with applications or clients that don't behave properly using HTTP 1.1.
:param protocol: Protocol class. Deprecated.
:param server_event: Used to collect the Server object. Deprecated.
:param minimum_chunk_size: Minimum size in bytes for http chunks. This can be used to improve
performance of applications which yield many small strings, though
using it technically violates the WSGI spec. This can be overridden
on a per request basis by setting environ['eventlet.minimum_write_chunk_size'].
:param log_x_forwarded_for: If True (the default), logs the contents of the x-forwarded-for
header in addition to the actual client ip address in the 'client_ip' field of the
log line.
:param custom_pool: A custom GreenPool instance which is used to spawn client green threads.
If this is supplied, max_size is ignored.
:param keepalive: If set to False, disables keepalives on the server; all connections will be
closed after serving one request.
:param log_output: A Boolean indicating if the server will log data or not.
:param log_format: A python format string that is used as the template to generate log lines.
The following values can be formatted into it: client_ip, date_time, request_line,
status_code, body_length, wall_seconds. The default is a good example of how to
use it.
:param url_length_limit: A maximum allowed length of the request url. If exceeded, 414 error
is returned.
:param debug: True if the server should send exception tracebacks to the clients on 500 errors.
If False, the server will respond with empty bodies.
:param socket_timeout: Timeout for client connections' socket operations. Default None means
wait forever.
:param capitalize_response_headers: Normalize response headers' names to Foo-Bar.
Default is True.
"""
serv = Server(sock, sock.getsockname(),
site, log,
environ=environ,
max_http_version=max_http_version,
protocol=protocol,
minimum_chunk_size=minimum_chunk_size,
log_x_forwarded_for=log_x_forwarded_for,
keepalive=keepalive,
log_output=log_output,
log_format=log_format,
url_length_limit=url_length_limit,
debug=debug,
socket_timeout=socket_timeout,
capitalize_response_headers=capitalize_response_headers,
)
if server_event is not None:
server_event.send(serv)
if max_size is None:
max_size = DEFAULT_MAX_SIMULTANEOUS_REQUESTS
if custom_pool is not None:
pool = custom_pool
else:
pool = greenpool.GreenPool(max_size)
try:
host, port = sock.getsockname()[:2]
port = ':%s' % (port, )
if hasattr(sock, 'do_handshake'):
scheme = 'https'
if port == ':443':
port = ''
else:
scheme = 'http'
if port == ':80':
port = ''
serv.log.write("(%s) wsgi starting up on %s://%s%s/\n" % (
serv.pid, scheme, host, port))
while is_accepting:
try:
client_socket = sock.accept()
client_socket[0].settimeout(serv.socket_timeout)
if debug:
serv.log.write("(%s) accepted %r\n" % (
serv.pid, client_socket[1]))
try:
pool.spawn_n(serv.process_request, client_socket)
except AttributeError:
warnings.warn("wsgi's pool should be an instance of "
"eventlet.greenpool.GreenPool, is %s. Please convert your"
" call site to use GreenPool instead" % type(pool),
DeprecationWarning, stacklevel=2)
pool.execute_async(serv.process_request, client_socket)
except ACCEPT_EXCEPTIONS as e:
if support.get_errno(e) not in ACCEPT_ERRNO:
raise
except (KeyboardInterrupt, SystemExit):
serv.log.write("wsgi exiting\n")
break
finally:
pool.waitall()
serv.log.write("(%s) wsgi exited, is_accepting=%s\n" % (
serv.pid, is_accepting))
try:
# NOTE: It's not clear whether we want this to leave the
# socket open or close it. Use cases like Spawning want
# the underlying fd to remain open, but if we're going
# that far we might as well not bother closing sock at
# all.
sock.close()
except socket.error as e:
if support.get_errno(e) not in BROKEN_SOCK:
traceback.print_exc()
|
sbadia/pkg-python-eventlet
|
eventlet/wsgi.py
|
Python
|
mit
| 32,722
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Setup for instrumentation host-driven tests."""
import logging
import os
import sys
import types
from pylib.host_driven import test_case
from pylib.host_driven import test_info_collection
from pylib.host_driven import test_runner
def _GetPythonFiles(root, files):
"""Returns all files from |files| that end in 'Test.py'.
Args:
root: A directory name with python files.
files: A list of file names.
Returns:
A list with all python files that match the testing naming scheme.
"""
return [os.path.join(root, f) for f in files if f.endswith('Test.py')]
def _InferImportNameFromFile(python_file):
"""Given a file, infer the import name for that file.
Example: /usr/foo/bar/baz.py -> baz.
Args:
python_file: Path to the Python file, ostensibly to import later.
Returns:
The module name for the given file.
"""
return os.path.splitext(os.path.basename(python_file))[0]
def _GetTestModules(host_driven_test_root, is_official_build):
"""Retrieve a list of python modules that match the testing naming scheme.
Walks the location of host-driven tests, imports them, and provides the list
of imported modules to the caller.
Args:
host_driven_test_root: The path to walk, looking for the
pythonDrivenTests or host_driven_tests directory
is_official_build: Whether to run only those tests marked 'official'
Returns:
A list of python modules under |host_driven_test_root| which match the
testing naming scheme. Each module should define one or more classes that
derive from HostDrivenTestCase.
"""
# By default run all host-driven tests under pythonDrivenTests or
# host_driven_tests.
host_driven_test_file_list = []
for root, _, files in os.walk(host_driven_test_root):
if (root.endswith('host_driven_tests') or
root.endswith('pythonDrivenTests') or
(is_official_build and (root.endswith('pythonDrivenTests/official') or
root.endswith('host_driven_tests/official')))):
host_driven_test_file_list += _GetPythonFiles(root, files)
host_driven_test_file_list.sort()
test_module_list = [_GetModuleFromFile(test_file)
for test_file in host_driven_test_file_list]
return test_module_list
def _GetModuleFromFile(python_file):
"""Gets the python module associated with a file by importing it.
Args:
python_file: File to import.
Returns:
The module object.
"""
sys.path.append(os.path.dirname(python_file))
import_name = _InferImportNameFromFile(python_file)
return __import__(import_name)
def _GetTestsFromClass(test_case_class, **kwargs):
"""Returns one test object for each test method in |test_case_class|.
Test methods are methods on the class which begin with 'test'.
Args:
test_case_class: Class derived from HostDrivenTestCase which contains zero
or more test methods.
kwargs: Keyword args to pass into the constructor of test cases.
Returns:
A list of test case objects, each initialized for a particular test method.
"""
test_names = [m for m in dir(test_case_class)
if _IsTestMethod(m, test_case_class)]
return [test_case_class(name, **kwargs) for name in test_names]
def _GetTestsFromModule(test_module, **kwargs):
"""Gets a list of test objects from |test_module|.
Args:
test_module: Module from which to get the set of test methods.
kwargs: Keyword args to pass into the constructor of test cases.
Returns:
A list of test case objects each initialized for a particular test method
defined in |test_module|.
"""
tests = []
for name in dir(test_module):
attr = getattr(test_module, name)
if _IsTestCaseClass(attr):
tests.extend(_GetTestsFromClass(attr, **kwargs))
return tests
def _IsTestCaseClass(test_class):
return (type(test_class) is types.TypeType and
issubclass(test_class, test_case.HostDrivenTestCase) and
test_class is not test_case.HostDrivenTestCase)
def _IsTestMethod(attrname, test_case_class):
"""Checks whether this is a valid test method.
Args:
attrname: The method name.
test_case_class: The test case class.
Returns:
True if test_case_class.'attrname' is callable and it starts with 'test';
False otherwise.
"""
attr = getattr(test_case_class, attrname)
return callable(attr) and attrname.startswith('test')
def _GetAllTests(test_root, is_official_build, **kwargs):
"""Retrieve a list of host-driven tests defined under |test_root|.
Args:
test_root: Path which contains host-driven test files.
is_official_build: Whether this is an official build.
kwargs: Keyword args to pass into the constructor of test cases.
Returns:
List of test case objects, one for each available test method.
"""
if not test_root:
return []
all_tests = []
test_module_list = _GetTestModules(test_root, is_official_build)
for module in test_module_list:
all_tests.extend(_GetTestsFromModule(module, **kwargs))
return all_tests
def InstrumentationSetup(host_driven_test_root, official_build,
instrumentation_options):
"""Creates a list of host-driven instrumentation tests and a runner factory.
Args:
host_driven_test_root: Directory where the host-driven tests are.
official_build: True if this is an official build.
instrumentation_options: An InstrumentationOptions object.
Returns:
A tuple of (TestRunnerFactory, tests).
"""
test_collection = test_info_collection.TestInfoCollection()
all_tests = _GetAllTests(
host_driven_test_root, official_build,
instrumentation_options=instrumentation_options)
test_collection.AddTests(all_tests)
available_tests = test_collection.GetAvailableTests(
instrumentation_options.annotations,
instrumentation_options.exclude_annotations,
instrumentation_options.test_filter)
logging.debug('All available tests: ' + str(
[t.tagged_name for t in available_tests]))
def TestRunnerFactory(device, shard_index):
return test_runner.HostDrivenTestRunner(
device, shard_index,
instrumentation_options.tool)
return (TestRunnerFactory, available_tests)
|
Teamxrtc/webrtc-streaming-node
|
third_party/webrtc/src/chromium/src/build/android/pylib/host_driven/setup.py
|
Python
|
mit
| 6,378
|
#!/usr/bin/python
import sys
from qpid.messaging import *
#global vars
broker_local = "localhost:5672"
addr_control = "agie_inbound/agie_inbound_data"
def broker_conn():
# create connection to local broker
lb_connection = Connection(broker_local)
try:
lb_connection.open()
session = lb_connection.session()
receiver = session.receiver("agie_data_net1")
while True:
message = receiver.fetch()
received = message.content
print 'received', received
session.acknowledge()
except MessagingError,m:
print m
finally:
lb_connection.close()
broker_conn()
|
jkirklan/agietst
|
sandbox/get_data_net1.py
|
Python
|
gpl-3.0
| 662
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Base bundles.
.. py:data:: invenio
Invenio JavaScript scripts
.. py:data:: styles
Stylesheets such as Twitter Bootstrap, Font-Awesome, Invenio, ...
.. py:data:: jquery
JavaScript libraries such as jQuery, Type Ahead, Bootstrap, Hogan, ...
.. note::
``bootstrap.js`` provides ``$.fn.button`` which will be overwritten by
jQueryUI button when loaded globally. Use require.js to load only the
jQueryUI modules of your needs.
.. code-block:: javascript
require(['jquery', 'ui/accordion'], function($) {
$(function(){
$('#accordion').accordion()
})
})
.. py:data:: lessjs
LessCSS JavaScript library that is used in debug mode to render the less
stylesheets
.. py:data:: requirejs
Require.js JavaScript library used in debug mode to load asynchronously the
Javascript modules (defined using AMD).
.. py:data:: almondjs
Require.js JavaScript library used in production mode. It cannot load
asynchronously the module that must be bundles using ``r.js``.
"""
from __future__ import unicode_literals
import mimetypes
from flask import current_app
from invenio.ext.assets import Bundle, RequireJSFilter
mimetypes.add_type("text/css", ".less")
styles = Bundle(
"vendors/jquery-tokeninput/styles/token-input.css",
"vendors/jquery-tokeninput/styles/token-input-facebook.css",
"vendors/typeahead.js-bootstrap3.less/typeahead.css",
"less/base.less",
"less/user-menu.less",
"less/sticky-footer.less",
"less/footer.less",
output="invenio.css",
depends=[
"less/base.less",
"less/base/**/*.less"
],
filters="less,cleancss",
weight=50,
bower={
"bootstrap": "3.3.4",
"font-awesome": "4.1.0",
"typeahead.js-bootstrap3.less": "0.2.3",
}
)
jquery = Bundle(
"js/init.js",
output="jquery.js",
filters="requirejs",
weight=10,
bower={
# The dependencies marked as *orphan* are not part of any bundles
# and loaded manually using the script tag. Usually from legacy pages.
"flot": "latest", # orphan
"jquery": "~1.11",
"jquery.caret": "https://github.com/acdvorak/jquery.caret.git",
"jquery-form": "latest", # orphan
"jquery.hotkeys": "https://github.com/jeresig/" # orphan
"jquery.hotkeys.git",
"jquery.jeditable": "http://invenio-software.org/download/jquery/"
"v1.5/js/jquery.jeditable.mini.js",
"jquery-migrate": "latest", # orphan
"jquery-multifile": "https://github.com/fyneworks/multifile",
"jquery-tablesorter": "http://invenio-software.org/download/jquery/"
"jquery.tablesorter.20111208.zip", # orphan
"jquery-tokeninput": "latest",
"jquery.treeview": "latest", # orphan, to be replaced by jqTree
"json2": "latest", # orphan
"hogan": "~3",
"MathJax": "~2.4", # orphan
"swfobject": "latest", # orphan
"typeahead.js": "latest",
"uploadify": "latest" # orphan
# "bootstrap": "*", is set by invenio.css already.
}
)
invenio = Bundle(
"js/invenio.js",
output="invenio.js",
filters=RequireJSFilter(exclude=[jquery]),
weight=90
)
admin = Bundle(
"js/admin.js",
"vendors/admin-lte/dist/js/app.min.js",
output="admin.js",
filters=RequireJSFilter(exclude=[jquery]),
weight=50
)
admin_styles = Bundle(
"vendors/admin-lte/dist/css/AdminLTE.min.css",
"vendors/admin-lte/dist/css/skins/{0}.min.css".format(
current_app.config.get("ADMIN_UI_SKIN")),
output="admin.css",
filters="less,cleancss",
weight=91, # load after invenio.css
bower={
"admin-lte": "latest"
}
)
# require.js is only used when:
#
# - ASSETS_DEBUG is True
# - REQUIREJS_RUN_IN_DEBUG is not False
requirejs = Bundle(
"vendors/requirejs/require.js",
"js/settings.js",
output="require.js",
filters="uglifyjs",
weight=0,
bower={
"requirejs": "latest",
"requirejs-hogan-plugin": "latest"
}
)
# almond.js is only used when:
#
# - ASSETS_DEBUG is False
# - or REQUIREJS_RUN_IN_DEBUG is True
almondjs = Bundle(
"vendors/almond/almond.js",
"js/settings.js",
output="almond.js",
filters="uglifyjs",
weight=0,
bower={
"almond": "latest"
}
)
|
quantifiedcode-bot/invenio-base
|
invenio_base/bundles.py
|
Python
|
gpl-2.0
| 5,259
|
# coding=utf-8
import unittest
"""210. Course Schedule II
https://leetcode.com/problems/course-schedule-ii/description/
There are a total of _n_ courses you have to take, labeled from `0` to `n-1`.
Some courses may have prerequisites, for example to take course 0 you have to
first take course 1, which is expressed as a pair: `[0,1]`
Given the total number of courses and a list of prerequisite **pairs** ,
return the ordering of courses you should take to finish all courses.
There may be multiple correct orders, you just need to return one of them. If
it is impossible to finish all courses, return an empty array.
**Example 1:**
**Input:** 2, [[1,0]]
**Output:**[0,1]
**Explanation:** There are a total of 2 courses to take. To take course 1 you should have finished
course 0. So the correct course order is [0,1] .
**Example 2:**
**Input:** 4, [[1,0],[2,0],[3,1],[3,2]]
**Output:**[0,1,2,3] or [0,2,1,3]
**Explanation:** There are a total of 4 courses to take. To take course 3 you should have finished both
courses 1 and 2. Both courses 1 and 2 should be taken after you finished course 0.
So one correct course order is [0,1,2,3]. Another correct ordering is [0,2,1,3] .
**Note:**
1. The input prerequisites is a graph represented by **a list of edges** , not adjacency matrices. Read more about [how a graph is represented](https://www.khanacademy.org/computing/computer-science/algorithms/graph-representation/a/representing-graphs).
2. You may assume that there are no duplicate edges in the input prerequisites.
Similar Questions:
Course Schedule (course-schedule)
Alien Dictionary (alien-dictionary)
Minimum Height Trees (minimum-height-trees)
Sequence Reconstruction (sequence-reconstruction)
Course Schedule III (course-schedule-iii)
"""
class Solution(object):
def findOrder(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: List[int]
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
|
openqt/algorithms
|
leetcode/python/lc210-course-schedule-ii.py
|
Python
|
gpl-3.0
| 2,198
|
# Copyright (C) 2016 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
from .oned import Dataset1DBase, Dataset
from .twod import Dataset2DBase, Dataset2D
from .nd import DatasetNDBase, DatasetND
from .text import DatasetText
from .date import DatasetDateTimeBase, DatasetDateTime
class _DatasetPlugin:
"""Shared methods for dataset plugins."""
def __init__(self, manager, ds):
self.pluginmanager = manager
self.pluginds = ds
def getPluginData(self, attr):
self.pluginmanager.update()
return getattr(self.pluginds, attr)
def linkedInformation(self):
"""Return information about how this dataset was created."""
fields = []
for name, val in self.pluginmanager.fields.items():
fields.append('%s: %s' % (str(name), str(val)))
try:
shape = [str(x) for x in self.data.shape]
except AttributeError:
shape = [str(len(self.data))]
shape = '\u00d7'.join(shape)
return '%s plugin dataset (fields %s), size %s' % (
self.pluginmanager.plugin.name,
', '.join(fields),
shape)
def canUnlink(self):
"""Can relationship be unlinked?"""
return True
def deleteRows(self, row, numrows):
pass
def insertRows(self, row, numrows, rowdata):
pass
def saveDataRelationToText(self, fileobj, name):
"""Save plugin to file, if this is the first one."""
# only try to save if this is the 1st dataset of this plugin
# manager in the document, so that we don't save more than once
docdatasets = set( self.document.data.values() )
for ds in self.pluginmanager.veuszdatasets:
if ds in docdatasets:
if ds is self:
# is 1st dataset
self.pluginmanager.saveToFile(fileobj)
return
def saveDataDumpToText(self, fileobj, name):
"""Save data to text: not used."""
def saveDataDumpToHDF5(self, group, name):
"""Save data to HDF5: not used."""
@property
def dstype(self):
"""Return type of plugin."""
return self.pluginmanager.plugin.name
class Dataset1DPlugin(_DatasetPlugin, Dataset1DBase):
"""Return 1D dataset from a plugin."""
def __init__(self, manager, ds):
_DatasetPlugin.__init__(self, manager, ds)
Dataset1DBase.__init__(self)
def userSize(self):
"""Size of dataset."""
return str( self.data.shape[0] )
def __getitem__(self, key):
"""Return a dataset based on this dataset
We override this from DatasetConcreteBase as it would return a
DatsetExpression otherwise, not chopped sets of data.
"""
return Dataset(**self._getItemHelper(key))
# parent class sets these attributes, so override setattr to do nothing
data = property(
lambda self: self.getPluginData('data'),
lambda self, val: None )
serr = property(
lambda self: self.getPluginData('serr'),
lambda self, val: None )
nerr = property(
lambda self: self.getPluginData('nerr'),
lambda self, val: None )
perr = property(
lambda self: self.getPluginData('perr'),
lambda self, val: None )
class Dataset2DPlugin(_DatasetPlugin, Dataset2DBase):
"""Return 2D dataset from a plugin."""
def __init__(self, manager, ds):
_DatasetPlugin.__init__(self, manager, ds)
Dataset2DBase.__init__(self)
def __getitem__(self, key):
return Dataset2D(
self.data[key], xrange=self.xrange, yrange=self.yrange,
xedge=self.xedge, yedge=self.yedge,
xcent=self.xcent, ycent=self.ycent)
data = property(
lambda self: self.getPluginData('data'),
lambda self, val: None )
xrange = property(
lambda self: self.getPluginData('rangex'),
lambda self, val: None )
yrange = property(
lambda self: self.getPluginData('rangey'),
lambda self, val: None )
xedge = property(
lambda self: self.getPluginData('xedge'),
lambda self, val: None )
yedge = property(
lambda self: self.getPluginData('yedge'),
lambda self, val: None )
xcent = property(
lambda self: self.getPluginData('xcent'),
lambda self, val: None )
ycent = property(
lambda self: self.getPluginData('ycent'),
lambda self, val: None )
class DatasetNDPlugin(_DatasetPlugin, DatasetNDBase):
"""Return N-dimensional dataset from plugin."""
def __init__(self, manager, ds):
_DatasetPlugin.__init__(self, manager, ds)
DatasetNDBase.__init__(self)
def __getitem__(self, key):
return DatasetND(self.data[key])
data = property(
lambda self: self.getPluginData('data'),
lambda self, val: None )
class DatasetTextPlugin(_DatasetPlugin, DatasetText):
"""Return text dataset from a plugin."""
def __init__(self, manager, ds):
_DatasetPlugin.__init__(self, manager, ds)
DatasetText.__init__(self, [])
def __getitem__(self, key):
return DatasetText(self.data[key])
data = property(
lambda self: self.getPluginData('data'),
lambda self, val: None )
class DatasetDateTimePlugin(_DatasetPlugin, DatasetDateTimeBase):
"""Return date dataset from plugin."""
def __init__(self, manager, ds):
_DatasetPlugin.__init__(self, manager, ds)
DatasetDateTimeBase.__init__(self)
self.serr = self.perr = self.nerr = None
def __getitem__(self, key):
return DatasetDateTime(self.data[key])
data = property(
lambda self: self.getPluginData('data'),
lambda self, val: None )
|
veusz/veusz
|
veusz/datasets/plugin.py
|
Python
|
gpl-2.0
| 6,627
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import List, Instance
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.envisage.tasks.base_task_plugin import BaseTaskPlugin
from pychron.experiment.events import ExperimentEventAddition, START_QUEUE, END_QUEUE
from pychron.social.email.emailer import Emailer
from pychron.social.email.experiment_notifier import ExperimentNotifier
from pychron.social.email.tasks.preferences import EmailPreferencesPane
class EmailPlugin(BaseTaskPlugin):
id = 'pychron.social.email.plugin'
name = 'Email'
test_email_server_description = 'Test connection to the SMTP Email Server'
events = List(contributes_to='pychron.experiment.events')
experiment_notifier = Instance(ExperimentNotifier)
def test_email_server(self):
e = self._email_factory()
return e.test_email_server()
# private
def _email_factory(self):
return Emailer()
def _preferences_panes_default(self):
return [EmailPreferencesPane]
def _service_offers_default(self):
so = self.service_offer_factory(factory=self._email_factory,
protocol='pychron.social.email.emailer.Emailer')
return [so]
def _experiment_notifier_default(self):
exp = ExperimentNotifier(emailer=Emailer())
return exp
def _events_default(self):
evts = [ExperimentEventAddition(id='pychron.experiment_notifier.start_queue',
action=self.experiment_notifier.start_queue,
level=START_QUEUE),
ExperimentEventAddition(id='pychron.experiment_notifier.end_queue',
action=self.experiment_notifier.end_queue,
level=END_QUEUE)]
return evts
# ============= EOF =============================================
|
UManPychron/pychron
|
pychron/social/email/tasks/plugin.py
|
Python
|
apache-2.0
| 2,813
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pyscada', '0010_auto_20160115_0918'),
('modbus', '0003_auto_20160115_0918'),
]
operations = [
migrations.RenameField(
model_name='modbusdevice',
old_name='modbus_client',
new_name='modbus_device',
),
]
|
trombastic/PyScada
|
pyscada/modbus/migrations/0004_auto_20160115_0920.py
|
Python
|
gpl-3.0
| 454
|
from .logger import *
|
pkenway/caver
|
py/caverlib/logging/__init__.py
|
Python
|
gpl-3.0
| 22
|
from django.conf.urls import patterns, url
from django.views.generic import ListView, DetailView
from django.views.generic.dates import ArchiveIndexView
from apps.podcast.models import Episode, Contributor
from apps.podcast.views import canonical_redirect, PodcastFeed
urlpatterns = patterns('',
url(r'^$', ArchiveIndexView.as_view(model=Episode, date_field="pub_date",
queryset=Episode.objects.filter(status=2))),
url(r'^contributors/$', ListView.as_view(
queryset=Contributor.objects.select_related()
)),
url(r'^contributors/(?P<slug>[\w-]+)/*$', DetailView.as_view(model=Contributor), name='ContributorDetail'),
url(r'feed/$', PodcastFeed),
url(r'^(?P<episode_number>\w+)/$', canonical_redirect),
url(r'^episode/(?P<slug>[\w-]+)/*$', DetailView.as_view(model=Episode), name='EpisodeDetail'),
)
|
stickwithjosh/hypodrical
|
apps/podcast/urls.py
|
Python
|
mit
| 878
|
from unittest import TestCase as Base
from memoize.core import *
from memoize.time import time, sleep
from memoize import Memoizer
class TestCase(Base):
memo_kwargs = {}
def setUp(self):
self.store = {}
self.records = []
self.memo = Memoizer(self.store, **self.memo_kwargs)
def append_args(self, *args, **kwargs):
self.records.append((args, kwargs))
return len(self.records)
|
mikeboers/PyMemoize
|
tests/common.py
|
Python
|
bsd-3-clause
| 434
|
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SQLAlchemy storage backend."""
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import session as db_session
from oslo_db.sqlalchemy import utils as db_utils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
from sqlalchemy import or_
from sqlalchemy.orm.exc import NoResultFound
from iotronic.common import exception
from iotronic.common.i18n import _
from iotronic.common import states
from iotronic.db import api
from iotronic.db.sqlalchemy import models
CONF = cfg.CONF
CONF.import_opt('heartbeat_timeout',
'iotronic.conductor.manager',
group='conductor')
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(CONF)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_backend():
"""The backend is this module itself."""
return Connection()
def model_query(model, *args, **kwargs):
"""Query helper for simpler session usage.
:param session: if present, the session to use
"""
session = kwargs.get('session') or get_session()
query = session.query(model, *args)
return query
def add_identity_filter(query, value):
"""Adds an identity filter to a query.
Filters results by ID, if supplied value is a valid integer.
Otherwise attempts to filter results by UUID.
:param query: Initial query to add filter to.
:param value: Value for filtering results by.
:return: Modified query.
"""
if strutils.is_int_like(value):
return query.filter_by(id=value)
elif uuidutils.is_uuid_like(value):
return query.filter_by(uuid=value)
else:
raise exception.InvalidIdentity(identity=value)
def _paginate_query(model, limit=None, marker=None, sort_key=None,
sort_dir=None, query=None):
if not query:
query = model_query(model)
sort_keys = ['id']
if sort_key and sort_key not in sort_keys:
sort_keys.insert(0, sort_key)
try:
query = db_utils.paginate_query(query, model, limit, sort_keys,
marker=marker, sort_dir=sort_dir)
except db_exc.InvalidSortKey:
raise exception.InvalidParameterValue(
_('The sort_key value "%(key)s" is an invalid field for sorting')
% {'key': sort_key})
return query.all()
class Connection(api.Connection):
"""SqlAlchemy connection."""
def __init__(self):
pass
def _add_location_filter_by_board(self, query, value):
if strutils.is_int_like(value):
return query.filter_by(board_id=value)
else:
query = query.join(models.Board,
models.Location.board_id == models.Board.id)
return query.filter(models.Board.uuid == value)
def _add_boards_filters(self, query, filters):
if filters is None:
filters = []
if 'project_id' in filters:
query = query.filter(models.Board.project == filters['project_id'])
if 'status' in filters:
query = query.filter(models.Board.status == filters['status'])
return query
def _add_plugins_filters(self, query, filters):
if filters is None:
filters = []
if 'owner' in filters:
if 'public' in filters and filters['public']:
query = query.filter(
or_(
models.Plugin.owner == filters['owner'],
models.Plugin.public == 1)
)
else:
query = query.filter(models.Plugin.owner == filters['owner'])
return query
def _add_wampagents_filters(self, query, filters):
if filters is None:
filters = []
if 'online' in filters:
if filters['online']:
query = query.filter(models.WampAgent.online == 1)
else:
query = query.filter(models.WampAgent.online == 0)
if 'no_ragent' in filters:
if filters['no_ragent']:
query = query.filter(models.WampAgent.ragent == 0)
else:
query = query.filter(models.WampAgent.ragent == 1)
return query
def _do_update_board(self, board_id, values):
session = get_session()
with session.begin():
query = model_query(models.Board, session=session)
query = add_identity_filter(query, board_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.BoardNotFound(board=board_id)
ref.update(values)
return ref
def _do_update_plugin(self, plugin_id, values):
session = get_session()
with session.begin():
query = model_query(models.Plugin, session=session)
query = add_identity_filter(query, plugin_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.PluginNotFound(plugin=plugin_id)
ref.update(values)
return ref
def _do_update_injection_plugin(self, injection_plugin_id, values):
session = get_session()
with session.begin():
query = model_query(models.InjectionPlugin, session=session)
query = add_identity_filter(query, injection_plugin_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.InjectionPluginNotFound(
injection_plugin=injection_plugin_id)
ref.update(values)
return ref
# BOARD api
def get_boardinfo_list(self, columns=None, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
# list-ify columns default values because it is bad form
# to include a mutable list in function definitions.
if columns is None:
columns = [models.Board.id]
else:
columns = [getattr(models.Board, c) for c in columns]
query = model_query(*columns, base_model=models.Board)
query = self._add_boards_filters(query, filters)
return _paginate_query(models.Board, limit, marker,
sort_key, sort_dir, query)
def get_board_list(self, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Board)
query = self._add_boards_filters(query, filters)
return _paginate_query(models.Board, limit, marker,
sort_key, sort_dir, query)
def create_board(self, values):
# ensure defaults are present for new boards
if 'uuid' not in values:
values['uuid'] = uuidutils.generate_uuid()
if 'status' not in values:
values['status'] = states.REGISTERED
board = models.Board()
board.update(values)
try:
board.save()
except db_exc.DBDuplicateEntry as exc:
if 'code' in exc.columns:
raise exception.DuplicateCode(code=values['code'])
raise exception.BoardAlreadyExists(uuid=values['uuid'])
return board
def get_board_by_id(self, board_id):
query = model_query(models.Board).filter_by(id=board_id)
try:
return query.one()
except NoResultFound:
raise exception.BoardNotFound(board=board_id)
def get_board_id_by_uuid(self, board_uuid):
query = model_query(models.Board.id).filter_by(uuid=board_uuid)
try:
return query.one()
except NoResultFound:
raise exception.BoardNotFound(board=board_uuid)
def get_board_by_uuid(self, board_uuid):
query = model_query(models.Board).filter_by(uuid=board_uuid)
try:
return query.one()
except NoResultFound:
raise exception.BoardNotFound(board=board_uuid)
def get_board_by_name(self, board_name):
query = model_query(models.Board).filter_by(name=board_name)
try:
return query.one()
except NoResultFound:
raise exception.BoardNotFound(board=board_name)
def get_board_by_code(self, board_code):
query = model_query(models.Board).filter_by(code=board_code)
try:
return query.one()
except NoResultFound:
raise exception.BoardNotFound(board=board_code)
def destroy_board(self, board_id):
session = get_session()
with session.begin():
query = model_query(models.Board, session=session)
query = add_identity_filter(query, board_id)
try:
board_ref = query.one()
except NoResultFound:
raise exception.BoardNotFound(board=board_id)
# Get board ID, if an UUID was supplied. The ID is
# required for deleting all ports, attached to the board.
if uuidutils.is_uuid_like(board_id):
board_id = board_ref['id']
location_query = model_query(models.Location, session=session)
location_query = self._add_location_filter_by_board(
location_query, board_id)
location_query.delete()
query.delete()
def update_board(self, board_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Board.")
raise exception.InvalidParameterValue(err=msg)
try:
return self._do_update_board(board_id, values)
except db_exc.DBDuplicateEntry as e:
if 'name' in e.columns:
raise exception.DuplicateName(name=values['name'])
elif 'uuid' in e.columns:
raise exception.BoardAlreadyExists(uuid=values['uuid'])
else:
raise e
# CONDUCTOR api
def register_conductor(self, values, update_existing=False):
session = get_session()
with session.begin():
query = (model_query(models.Conductor, session=session)
.filter_by(hostname=values['hostname']))
try:
ref = query.one()
if ref.online is True and not update_existing:
raise exception.ConductorAlreadyRegistered(
conductor=values['hostname'])
except NoResultFound:
ref = models.Conductor()
ref.update(values)
# always set online and updated_at fields when registering
# a conductor, especially when updating an existing one
ref.update({'updated_at': timeutils.utcnow(),
'online': True})
ref.save(session)
return ref
def get_conductor(self, hostname):
try:
return (model_query(models.Conductor)
.filter_by(hostname=hostname, online=True)
.one())
except NoResultFound:
raise exception.ConductorNotFound(conductor=hostname)
def unregister_conductor(self, hostname):
session = get_session()
with session.begin():
query = (model_query(models.Conductor, session=session)
.filter_by(hostname=hostname, online=True))
count = query.update({'online': False})
if count == 0:
raise exception.ConductorNotFound(conductor=hostname)
def touch_conductor(self, hostname):
session = get_session()
with session.begin():
query = (model_query(models.Conductor, session=session)
.filter_by(hostname=hostname))
# since we're not changing any other field, manually set updated_at
# and since we're heartbeating, make sure that online=True
count = query.update({'updated_at': timeutils.utcnow(),
'online': True})
if count == 0:
raise exception.ConductorNotFound(conductor=hostname)
# LOCATION api
def create_location(self, values):
location = models.Location()
location.update(values)
location.save()
return location
def update_location(self, location_id, values):
# NOTE(dtantsur): this can lead to very strange errors
session = get_session()
try:
with session.begin():
query = model_query(models.Location, session=session)
query = add_identity_filter(query, location_id)
ref = query.one()
ref.update(values)
except NoResultFound:
raise exception.LocationNotFound(location=location_id)
return ref
def destroy_location(self, location_id):
session = get_session()
with session.begin():
query = model_query(models.Location, session=session)
query = add_identity_filter(query, location_id)
count = query.delete()
if count == 0:
raise exception.LocationNotFound(location=location_id)
def get_locations_by_board_id(self, board_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Location)
query = query.filter_by(board_id=board_id)
return _paginate_query(models.Location, limit, marker,
sort_key, sort_dir, query)
# SESSION api
def create_session(self, values):
session = models.SessionWP()
session.update(values)
session.save()
return session
def update_session(self, ses_id, values):
# NOTE(dtantsur): this can lead to very strange errors
session = get_session()
try:
with session.begin():
query = model_query(models.SessionWP, session=session)
query = add_identity_filter(query, ses_id)
ref = query.one()
ref.update(values)
except NoResultFound:
raise exception.SessionWPNotFound(ses=ses_id)
return ref
def get_session_by_board_uuid(self, board_uuid, valid):
query = model_query(
models.SessionWP).filter_by(
board_uuid=board_uuid).filter_by(
valid=valid)
try:
return query.one()
except NoResultFound:
raise exception.BoardNotConnected(board=board_uuid)
def get_session_by_id(self, session_id):
query = model_query(models.SessionWP).filter_by(session_id=session_id)
try:
return query.one()
except NoResultFound:
return None
def get_valid_wpsessions_list(self):
query = model_query(models.SessionWP).filter_by(valid=1)
return query.all()
# WAMPAGENT api
def register_wampagent(self, values, update_existing=False):
session = get_session()
with session.begin():
query = (model_query(models.WampAgent, session=session)
.filter_by(hostname=values['hostname']))
try:
ref = query.one()
if ref.online is True and not update_existing:
raise exception.WampAgentAlreadyRegistered(
wampagent=values['hostname'])
except NoResultFound:
ref = models.WampAgent()
ref.update(values)
# always set online and updated_at fields when registering
# a wampagent, especially when updating an existing one
ref.update({'updated_at': timeutils.utcnow(),
'online': True})
ref.save(session)
return ref
def get_wampagent(self, hostname):
try:
return (model_query(models.WampAgent)
.filter_by(hostname=hostname, online=True)
.one())
except NoResultFound:
raise exception.WampAgentNotFound(wampagent=hostname)
def get_registration_wampagent(self):
try:
return (model_query(models.WampAgent)
.filter_by(ragent=True, online=True)
.one())
except NoResultFound:
raise exception.WampRegistrationAgentNotFound()
def unregister_wampagent(self, hostname):
session = get_session()
with session.begin():
query = (model_query(models.WampAgent, session=session)
.filter_by(hostname=hostname, online=True))
count = query.update({'online': False})
if count == 0:
raise exception.WampAgentNotFound(wampagent=hostname)
def touch_wampagent(self, hostname):
session = get_session()
with session.begin():
query = (model_query(models.WampAgent, session=session)
.filter_by(hostname=hostname))
# since we're not changing any other field, manually set updated_at
# and since we're heartbeating, make sure that online=True
count = query.update({'updated_at': timeutils.utcnow(),
'online': True})
if count == 0:
raise exception.WampAgentNotFound(wampagent=hostname)
def get_wampagent_list(self, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.WampAgent)
query = self._add_wampagents_filters(query, filters)
return _paginate_query(models.WampAgent, limit, marker,
sort_key, sort_dir, query)
# PLUGIN api
def get_plugin_by_id(self, plugin_id):
query = model_query(models.Plugin).filter_by(id=plugin_id)
try:
return query.one()
except NoResultFound:
raise exception.PluginNotFound(plugin=plugin_id)
def get_plugin_by_uuid(self, plugin_uuid):
query = model_query(models.Plugin).filter_by(uuid=plugin_uuid)
try:
return query.one()
except NoResultFound:
raise exception.PluginNotFound(plugin=plugin_uuid)
def get_plugin_by_name(self, plugin_name):
query = model_query(models.Plugin).filter_by(name=plugin_name)
try:
return query.one()
except NoResultFound:
raise exception.PluginNotFound(plugin=plugin_name)
def destroy_plugin(self, plugin_id):
session = get_session()
with session.begin():
query = model_query(models.Plugin, session=session)
query = add_identity_filter(query, plugin_id)
try:
plugin_ref = query.one()
except NoResultFound:
raise exception.PluginNotFound(plugin=plugin_id)
# Get plugin ID, if an UUID was supplied. The ID is
# required for deleting all ports, attached to the plugin.
if uuidutils.is_uuid_like(plugin_id):
plugin_id = plugin_ref['id']
query.delete()
def update_plugin(self, plugin_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Plugin.")
raise exception.InvalidParameterValue(err=msg)
try:
return self._do_update_plugin(plugin_id, values)
except db_exc.DBDuplicateEntry as e:
if 'name' in e.columns:
raise exception.DuplicateName(name=values['name'])
elif 'uuid' in e.columns:
raise exception.PluginAlreadyExists(uuid=values['uuid'])
else:
raise e
def create_plugin(self, values):
# ensure defaults are present for new plugins
if 'uuid' not in values:
values['uuid'] = uuidutils.generate_uuid()
plugin = models.Plugin()
plugin.update(values)
try:
plugin.save()
except db_exc.DBDuplicateEntry:
raise exception.PluginAlreadyExists(uuid=values['uuid'])
return plugin
def get_plugin_list(self, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Plugin)
query = self._add_plugins_filters(query, filters)
return _paginate_query(models.Plugin, limit, marker,
sort_key, sort_dir, query)
# INJECTION PLUGIN api
def get_injection_plugin_by_board_uuid(self, board_uuid):
query = model_query(
models.InjectionPlugin).filter_by(
board_uuid=board_uuid)
try:
return query.one()
except NoResultFound:
raise exception.InjectionPluginNotFound()
def create_injection_plugin(self, values):
# ensure defaults are present for new plugins
if 'uuid' not in values:
values['uuid'] = uuidutils.generate_uuid()
inj_plug = models.InjectionPlugin()
inj_plug.update(values)
try:
inj_plug.save()
except db_exc.DBDuplicateEntry:
raise exception.PluginAlreadyExists(uuid=values['uuid'])
return inj_plug
def update_injection_plugin(self, plugin_injection_id, values):
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Plugin.")
raise exception.InvalidParameterValue(err=msg)
try:
return self._do_update_injection_plugin(
plugin_injection_id, values)
except db_exc.DBDuplicateEntry as e:
if 'name' in e.columns:
raise exception.DuplicateName(name=values['name'])
elif 'uuid' in e.columns:
raise exception.PluginAlreadyExists(uuid=values['uuid'])
else:
raise e
def get_injection_plugin_by_uuids(self, board_uuid, plugin_uuid):
query = model_query(
models.InjectionPlugin).filter_by(
board_uuid=board_uuid).filter_by(
plugin_uuid=plugin_uuid)
try:
return query.one()
except NoResultFound:
raise exception.InjectionPluginNotFound()
def destroy_injection_plugin(self, injection_plugin_id):
session = get_session()
with session.begin():
query = model_query(models.InjectionPlugin, session=session)
query = add_identity_filter(query, injection_plugin_id)
try:
query.delete()
except NoResultFound:
raise exception.InjectionPluginNotFound()
def get_injection_plugin_list(self, board_uuid):
query = model_query(
models.InjectionPlugin).filter_by(
board_uuid=board_uuid)
return query.all()
|
MDSLab/s4t-iotronic
|
iotronic/db/sqlalchemy/api.py
|
Python
|
apache-2.0
| 23,917
|
""" Placeholder admin module is currently empty.
Purpose: placeholder for if and when we want to implement an admin module
Author: (none)
Date: Summer, 2018.
Copyright: (c) 2018 Tom W. Hartung, Groja.com, and JooMoo Websites LLC.
Reference:
Copied from seeourminds.com on 2018-08-20
"""
### from django.contrib import admin
###
### from .database import Answer
### from .database import Questionnaire
### admin.site.register(Answer)
### admin.site.register(Questionnaire)
|
tomwhartung/tomhartung.com
|
Site/content/admin.py
|
Python
|
gpl-2.0
| 476
|
class BitVector(object):
"""docstring for BitVector"""
"""infinite array of bits is present in bitvector"""
def __init__(self):
self.BitNum=0
self.length=0
def set(self,i):
self.BitNum=self.BitNum | 1 << i
self.length=self.BitNum.bit_length()
def reset(self,i):
resetValue=1<<i
self.BitNum=self.BitNum - resetValue
self.length=self.BitNum.bit_length()
def at(self,i):
if(i<0):
raise ValueError
if(i >=self.length):
return 0
return int(bin(self.BitNum)[-(i+1)])
def __repr__(self):
return bin(self.BitNum)[2:]
def __str__(self):
return bin(self.BitNum)[2:]
|
Seenivasanseeni/PyDaS
|
PDaS/BitVector.py
|
Python
|
mit
| 595
|
handlers = {}
def handle_event(event, callback):
if event in handlers:
handlers[event].append(callback)
else:
handlers[event] = [callback]
def trigger(event):
for callback in handlers.get(event, []):
callback()
|
Turbasen/turbasen.py
|
turbasen/events.py
|
Python
|
mit
| 249
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.