repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
raeeschachar/edx-e2e-mirror | regression/pages/studio/logout_studio.py | Python | agpl-3.0 | 384 | 0 | """
Logout Page for Studio
"""
from bok_choy.page_object import PageObject
from regression.pages.s | tudio import BASE_URL
class StudioLogout(PageObject):
"""
Logged Out Page for Studio
"""
url = BASE_URL
d | ef is_browser_on_page(self):
"""
Checks if we are on the correct page
"""
return self.q(css='.wrapper-text-welcome').present
|
flyingfish007/tempest | tempest/scenario/test_network_basic_ops.py | Python | apache-2.0 | 32,963 | 0 | # Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
from oslo_log import log as logging
import testtools
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest import exceptions
from tempest.scenario import manager
from tempest.services.network import resources as n | et_resources
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
Floating_IP_tuple = collections.namedtuple('Floating_IP_tuple',
['floating_ip', 'server'])
class TestNetworkBasicOps(manager.NetworkScenarioTest):
"""
This smok | e test suite assumes that Nova has been configured to
boot VM's with Neutron-managed networking, and attempts to
verify network connectivity as follows:
There are presumed to be two types of networks: tenant and
public. A tenant network may or may not be reachable from the
Tempest host. A public network is assumed to be reachable from
the Tempest host, and it should be possible to associate a public
('floating') IP address with a tenant ('fixed') IP address to
facilitate external connectivity to a potentially unroutable
tenant IP address.
This test suite can be configured to test network connectivity to
a VM via a tenant network, a public network, or both. If both
networking types are to be evaluated, tests that need to be
executed remotely on the VM (via ssh) will only be run against
one of the networks (to minimize test execution time).
Determine which types of networks to test as follows:
* Configure tenant network checks (via the
'tenant_networks_reachable' key) if the Tempest host should
have direct connectivity to tenant networks. This is likely to
be the case if Tempest is running on the same host as a
single-node devstack installation with IP namespaces disabled.
* Configure checks for a public network if a public network has
been configured prior to the test suite being run and if the
Tempest host should have connectivity to that public network.
Checking connectivity for a public network requires that a
value be provided for 'public_network_id'. A value can
optionally be provided for 'public_router_id' if tenants will
use a shared router to access a public network (as is likely to
be the case when IP namespaces are not enabled). If a value is
not provided for 'public_router_id', a router will be created
for each tenant and use the network identified by
'public_network_id' as its gateway.
"""
@classmethod
def skip_checks(cls):
super(TestNetworkBasicOps, cls).skip_checks()
if not (CONF.network.tenant_networks_reachable
or CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
raise cls.skipException(msg)
for ext in ['router', 'security-group']:
if not test.is_extension_enabled(ext, 'network'):
msg = "%s extension not enabled." % ext
raise cls.skipException(msg)
@classmethod
def setup_credentials(cls):
# Create no network resources for these tests.
cls.set_network_resources()
super(TestNetworkBasicOps, cls).setup_credentials()
def setUp(self):
super(TestNetworkBasicOps, self).setUp()
self.keypairs = {}
self.servers = []
def _setup_network_and_servers(self, **kwargs):
boot_with_port = kwargs.pop('boot_with_port', False)
self.security_group = \
self._create_security_group(tenant_id=self.tenant_id)
self.network, self.subnet, self.router = self.create_networks(**kwargs)
self.check_networks()
self.ports = []
self.port_id = None
if boot_with_port:
# create a port on the network and boot with that
self.port_id = self._create_port(self.network['id']).id
self.ports.append({'port': self.port_id})
name = data_utils.rand_name('server-smoke')
server = self._create_server(name, self.network, self.port_id)
self._check_tenant_network_connectivity()
floating_ip = self.create_floating_ip(server)
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
def check_networks(self):
"""
Checks that we see the newly created network/subnet/router via
checking the result of list_[networks,routers,subnets]
"""
seen_nets = self._list_networks()
seen_names = [n['name'] for n in seen_nets]
seen_ids = [n['id'] for n in seen_nets]
self.assertIn(self.network.name, seen_names)
self.assertIn(self.network.id, seen_ids)
if self.subnet:
seen_subnets = self._list_subnets()
seen_net_ids = [n['network_id'] for n in seen_subnets]
seen_subnet_ids = [n['id'] for n in seen_subnets]
self.assertIn(self.network.id, seen_net_ids)
self.assertIn(self.subnet.id, seen_subnet_ids)
if self.router:
seen_routers = self._list_routers()
seen_router_ids = [n['id'] for n in seen_routers]
seen_router_names = [n['name'] for n in seen_routers]
self.assertIn(self.router.name,
seen_router_names)
self.assertIn(self.router.id,
seen_router_ids)
def _create_server(self, name, network, port_id=None):
keypair = self.create_keypair()
self.keypairs[keypair['name']] = keypair
security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'networks': [
{'uuid': network.id},
],
'key_name': keypair['name'],
'security_groups': security_groups,
}
if port_id is not None:
create_kwargs['networks'][0]['port'] = port_id
server = self.create_server(name=name, create_kwargs=create_kwargs)
self.servers.append(server)
return server
def _get_server_key(self, server):
return self.keypairs[server['key_name']]['private_key']
def _check_tenant_network_connectivity(self):
ssh_login = CONF.compute.image_ssh_user
for server in self.servers:
# call the common method in the parent class
super(TestNetworkBasicOps, self).\
_check_tenant_network_connectivity(
server, ssh_login, self._get_server_key(server),
servers_for_debug=self.servers)
def check_public_network_connectivity(
self, should_connect=True, msg=None,
should_check_floating_ip_status=True):
"""Verifies connectivty to a VM via public network and floating IP,
and verifies floating IP has resource status is correct.
:param should_connect: bool. determines if connectivity check is
negative or positive.
:param msg: Failure message to add to Error message. Should describe
the place in the test scenario where the method was called,
to indicate the context of the failure
:param should_check_floating_ip_status: bool. should status of
floating_ip be checked or not
"""
ssh_login = CONF.compute.image_ssh_user
|
1tush/reviewboard | reviewboard/settings.py | Python | mit | 12,734 | 0.000393 | # Django settings for reviewboard project.
from __future__ import unicode_literals
import os
import re
import sys
import djblets
from django.core.urlresolvers import reverse
# Can't import django.utils.translation yet
_ = lambda s: s
DEBUG = True
ADMINS = (
('Example Joe', 'admin@example.com')
)
MANAGERS = ADMINS
# Time zone support. If enabled, Django stores date and time information as
# UTC in the database, uses time zone-aware datetime objects, and translate | s
# them to the user's time zone in templates and forms.
USE_TZ = True
# Local time zone for this installation. All choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# When USE_TZ is enabled, this is used as the default time zone for datetime
# objects
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be fo | und here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
# This should match the ID of the Site object in the database. This is used to
# figure out URLs to stick in e-mails and related pages.
SITE_ID = 1
# The prefix for e-mail subjects sent to administrators.
EMAIL_SUBJECT_PREFIX = "[Review Board] "
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
('djblets.template.loaders.conditional_cached.Loader', (
'django.template.loaders.filesystem.Loader',
'djblets.template.loaders.namespaced_app_dirs.Loader',
'djblets.extensions.loaders.load_template_source',
)),
)
MIDDLEWARE_CLASSES = [
# Keep these first, in order
'django.middleware.gzip.GZipMiddleware',
'reviewboard.admin.middleware.InitReviewBoardMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# These must go before anything that deals with settings.
'djblets.siteconfig.middleware.SettingsMiddleware',
'reviewboard.admin.middleware.LoadSettingsMiddleware',
'djblets.extensions.middleware.ExtensionsMiddleware',
'djblets.log.middleware.LoggingMiddleware',
'reviewboard.accounts.middleware.TimezoneMiddleware',
'reviewboard.admin.middleware.CheckUpdatesRequiredMiddleware',
'reviewboard.admin.middleware.X509AuthMiddleware',
'reviewboard.site.middleware.LocalSiteMiddleware',
# Keep this last so that everything is initialized before middleware
# from extensions are run.
'djblets.extensions.middleware.ExtensionsMiddlewareRunner',
]
RB_EXTRA_MIDDLEWARE_CLASSES = []
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.core.context_processors.static',
'djblets.cache.context_processors.ajax_serial',
'djblets.cache.context_processors.media_serial',
'djblets.siteconfig.context_processors.siteconfig',
'djblets.siteconfig.context_processors.settings_vars',
'djblets.urls.context_processors.site_root',
'reviewboard.accounts.context_processors.auth_backends',
'reviewboard.accounts.context_processors.profile',
'reviewboard.admin.context_processors.version',
'reviewboard.site.context_processors.localsite',
)
SITE_ROOT_URLCONF = 'reviewboard.urls'
ROOT_URLCONF = 'djblets.urls.root'
REVIEWBOARD_ROOT = os.path.abspath(os.path.split(__file__)[0])
# where is the site on your server ? - add the trailing slash.
SITE_ROOT = '/'
TEMPLATE_DIRS = (
# Don't forget to use absolute paths, not relative paths.
os.path.join(REVIEWBOARD_ROOT, 'templates'),
)
STATICFILES_DIRS = (
('lib', os.path.join(REVIEWBOARD_ROOT, 'static', 'lib')),
('rb', os.path.join(REVIEWBOARD_ROOT, 'static', 'rb')),
('djblets', os.path.join(os.path.dirname(djblets.__file__),
'static', 'djblets')),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djblets.extensions.staticfiles.ExtensionFinder',
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
RB_BUILTIN_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.staticfiles',
'djblets',
'djblets.configforms',
'djblets.datagrid',
'djblets.extensions',
'djblets.feedview',
'djblets.gravatars',
'djblets.log',
'djblets.pipeline',
'djblets.siteconfig',
'djblets.util',
'haystack',
'pipeline', # Must be after djblets.pipeline
'reviewboard',
'reviewboard.accounts',
'reviewboard.admin',
'reviewboard.attachments',
'reviewboard.changedescs',
'reviewboard.diffviewer',
'reviewboard.extensions',
'reviewboard.hostingsvcs',
'reviewboard.notifications',
'reviewboard.reviews',
'reviewboard.scmtools',
'reviewboard.site',
'reviewboard.webapi',
]
# If installed, add django_reset to INSTALLED_APPS. This is used for the
# 'manage.py reset' command, which is very useful during development.
try:
import django_reset
RB_BUILTIN_APPS.append('django_reset')
except ImportError:
pass
RB_EXTRA_APPS = []
WEB_API_ENCODERS = (
'djblets.webapi.encoders.ResourceAPIEncoder',
)
# The backends that are used to authenticate requests against the web API.
WEB_API_AUTH_BACKENDS = (
'djblets.webapi.auth.WebAPIBasicAuthBackend',
'reviewboard.webapi.auth_backends.WebAPITokenAuthBackend',
)
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
# Set up a default cache backend. This will mostly be useful for
# local development, as sites will override this.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'reviewboard',
},
}
LOGGING_NAME = "reviewboard"
LOGGING_REQUEST_FORMAT = "%(_local_site_name)s - %(user)s - %(path)s"
AUTH_PROFILE_MODULE = "accounts.Profile"
# Default expiration time for the cache. Note that this has no effect unless
# CACHE_BACKEND is specified in settings_local.py
CACHE_EXPIRATION_TIME = 60 * 60 * 24 * 30 # 1 month
# Custom test runner, which uses nose to find tests and execute them. This
# gives us a somewhat more comprehensive test execution than django's built-in
# runner, as well as some special features like a code coverage report.
TEST_RUNNER = 'reviewboard.test.RBTestRunner'
# Dependency checker functionality. Gives our users nice errors when they
# start out, instead of encountering them later on. Most of the magic for this
# happens in manage.py, not here.
install_help = '''
Please see https://www.reviewboard.org/docs/manual/dev/admin/
for help setting up Review Board.
'''
def dependency_error(string):
sys.stderr.write('%s\n' % string)
sys.stderr.write(install_help)
sys.exit(1)
if os.path.split(os.path.dirname(__file__))[1] != 'reviewboard':
dependency_error('The directory containing manage.py must be named '
'"reviewboard"')
LOCAL_ROOT = None
PRODUCTION = True
# Default ALLOWED_HOSTS to allow everything. This should be overridden in
# settings_local.py
ALLOWED_HOSTS = ['*']
# Cookie settings
LANGUAGE_COOKIE_NAME = "rblanguage"
SESSION_COOKIE_NAME = "rbsessionid"
SESSION_COOKIE_AGE = 365 * 24 * 60 * 60 # 1 year
# Default support settings
SUPPORT_URL_BASE = 'https://www.beanbaginc.com/support/reviewboard/'
DE |
luiscberrocal/homeworkpal | homeworkpal_project/employee/views.py | Python | mit | 3,567 | 0.001962 | from braces.views import LoginRequiredMixin
from django.contrib.auth.models import User, Group
from django.shortcuts import render
# Create your views here.
from django.views.generic import ListView, DetailView, CreateView, UpdateView
from rest_framework import viewsets
from employee.forms import CoachingSessionForm
from .serializers import EmployeeSerializer, UserSerializer, GroupSerializer, CompanyGroupSerializer
from .models import Employee, CompanyGroup, CoachingSession
class EmployeeViewSet(viewsets.ModelViewSet):
serializer_class = EmployeeSerializer
def get_queryset(self):
group_slug = self.request.query_params.get('group-slug', None)
if group_slug:
qs = Employee.objects.from_group(group_slug)
else:
qs = Employee.objects.all()
return qs
class CompanyGroupViewSet(viewsets.ModelViewSet):
queryset = CompanyGroup.objects.all()
serializer_class = CompanyGroupSerializer
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_ | joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
class EmployeeListView(LoginRequiredMixin, ListView):
model = Employee
| def get_queryset(self):
qs = Employee.objects.from_group(self.kwargs['group_slug'])
return qs
class EmployeeProjectsView(LoginRequiredMixin, DetailView):
model = Employee
context_object_name = 'employee'
template_name = 'employee/employee_projects.html'
class EmployeeGoalsView(LoginRequiredMixin, DetailView):
model = Employee
context_object_name = 'employee'
template_name = 'employee/employee_goals.html'
class CoachingSessionUpdateView(LoginRequiredMixin, UpdateView):
model = CoachingSession
context_object_name = 'coaching_session'
form_class = CoachingSessionForm
class CoachingSessionCreateView(LoginRequiredMixin, CreateView):
model = CoachingSession
context_object_name = 'coaching_session'
form_class = CoachingSessionForm
def get_context_data(self, **kwargs):
context = super(CoachingSessionCreateView, self).get_context_data(**kwargs)
context['form'].fields['employee'].queryset = Employee.objects.from_group(self.kwargs['group_slug'])
return context
def form_valid(self, form):
obj = form.save(commit=False)
employee = Employee.objects.get(user=self.request.user)
obj.coach = employee
obj.save()
return super(CoachingSessionCreateView, self).form_valid(form)
class CoachingSessionDetailView(LoginRequiredMixin, DetailView):
model = CoachingSession
context_object_name = 'coaching_session'
class CoachingSessionListView(LoginRequiredMixin, ListView):
model = CoachingSession
context_object_name = 'coaching_sessions'
template_name = 'employee/coachingsession_list.html'
def get_queryset(self):
if self.kwargs.get('employee_pk', None):
qs = CoachingSession.objects.filter(employee__companygroupemployeeassignment__group__slug=self.kwargs['group_slug'],
employee__pk=self.kwargs['employee_pk'])
else:
qs = CoachingSession.objects.filter(employee__companygroupemployeeassignment__group__slug=self.kwargs['group_slug'])
return qs
|
gthank/pytips | docs/source/conf.py | Python | isc | 7,815 | 0.00755 | # -*- coding: utf-8 -*-
#
# PyTips documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 26 20:55:10 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyTips'
copyright = u'2012, Hank Gay'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0-alpha.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0-alpha.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML f | iles (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyTipsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The fon | t size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PyTips.tex', u'PyTips Documentation',
u'Hank Gay', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pytips', u'PyTips Documentation',
[u'Hank Gay'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyTips', u'PyTips Documentation',
u'Hank Gay', 'PyTips', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
lento/cortex | python/IECoreMaya/VectorParameterUI.py | Python | bsd-3-clause | 4,276 | 0.029467 | ##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds
import IECore
import IECoreMaya
class VectorParameterUI( IECoreMaya.ParameterUI ) :
def __init__( self, node, parameter, **kw ) :
self.__dim = parameter.getTypedValue().dimensions()
if self.__dim == 2:
layout = maya.cmds.rowLayout(
numberOfColumns = 3,
columnWidth3 = [ IECoreMaya.ParameterUI.textColumnWidthIndex, IECoreMaya.ParameterUI.singleWidgetWidthIndex, IECoreMaya.ParameterUI.singleWidgetWidthIndex ]
)
elif self.__dim == 3:
layout = maya.cmds.rowLayout(
numberOfColumns = 4,
columnWidth4 = [ IECoreMaya.ParameterUI.textColumnWidthIndex, IECoreMaya.ParameterUI.singleWidgetWidthIndex, IECoreMaya.ParameterUI.singleWidgetWidthIndex, IECoreMaya.ParameterUI.singleWidgetWidthIndex ]
)
else:
raise RuntimeError("Unsupported vector dimension in VectorParameterUI")
IECoreMaya.ParameterUI.__init__( self, node, parameter, layout, **kw )
self.__fields = []
maya.cmds.text(
label = self.label(),
font = "smallPlainLabelFont",
align = "right",
annotation = self.description()
)
plug = self.plug()
| for i in range(0, self.__dim) :
self.__fields.append(
self.__fieldType()(
value = parameter.getTypedValue()[i]
)
)
maya.cmds.setParent("..")
self.replace( self.node(), self.parameter )
def replace( self, node, parameter ) :
IECoreMaya.ParameterUI.replace( self, node, parameter )
plug = self.plug()
for i in range(0, self.__dim):
childPlugName = self.nodeName() + "." + plug.child(i).partialName()
maya.cmds.con | nectControl( self.__fields[i], childPlugName )
self._addPopupMenu( parentUI = self.__fields[i], attributeName = childPlugName )
def __fieldType( self ):
if self.parameter.isInstanceOf( IECore.TypeId.V2iParameter ) or self.parameter.isInstanceOf( IECore.TypeId.V3iParameter ):
return maya.cmds.intField
else:
return maya.cmds.floatField
IECoreMaya.ParameterUI.registerUI( IECore.TypeId.V2iParameter, VectorParameterUI )
IECoreMaya.ParameterUI.registerUI( IECore.TypeId.V3iParameter, VectorParameterUI )
IECoreMaya.ParameterUI.registerUI( IECore.TypeId.V2fParameter, VectorParameterUI )
IECoreMaya.ParameterUI.registerUI( IECore.TypeId.V2dParameter, VectorParameterUI )
IECoreMaya.ParameterUI.registerUI( IECore.TypeId.V3fParameter, VectorParameterUI )
IECoreMaya.ParameterUI.registerUI( IECore.TypeId.V3dParameter, VectorParameterUI )
IECoreMaya.ParameterUI.registerUI( IECore.TypeId.Color3fParameter, VectorParameterUI, "numeric" )
|
ipapusha/amnet | tests/test_smt.py | Python | bsd-3-clause | 32,730 | 0.002383 | import numpy as np
import amnet
import z3
from numpy.linalg import norm
import sys
import unittest
import itertools
VISUALIZE = True # output graphviz drawings
if VISUALIZE:
import amnet.vis
class TestSmt(unittest.TestCase):
@classmethod
def setUpClass(cls):
print 'Setting up test floats.'
cls.floatvals = np.concatenate(
(np.linspace(-5., 5., 11), np.l | inspace(-5., 5., 10)),
axis=0
)
cls.floatvals2 = np.concatenate(
(np.linspace(-5., 5., 3), np.linspace(-.5, .5, 2)),
axis=0
)
cls.floatvals3 = np.linspace(-5., 5., 3)
cls.FPTOL = 1e-8
# set up global z3 parameters
# parameters from https://stackoverflow.com/a/12516269
#z3.set_param('auto_config', False)
#z3.set_param('smt. | case_split', 5)
#z3.set_param('smt.relevancy', 2)
def validate_outputs(self, phi, onvals, true_f=None, verbose=False):
# encode phi using default context and solver
enc = amnet.smt.SmtEncoder(phi=phi, solver=None)
# tap the input and output vars
invar = enc.var_of_input()
outvar = enc.var_of(phi)
# check dimensions
self.assertEqual(phi.indim, len(invar))
self.assertEqual(phi.outdim, len(outvar))
# go through inputs
for val in onvals:
# get a new value
fpval = np.array(val)
self.assertEqual(len(fpval), phi.indim)
# evaluate using the Amn tree
fpeval = phi.eval(fpval)
self.assertEqual(len(fpeval), phi.outdim)
if verbose:
print 'inp:', fpval
print 'fpeval: ', fpeval
# compare to true floating point function, if it's provided
if true_f is not None:
true_eval = true_f(fpval)
if verbose: print 'true_eval: ', true_eval
self.assertAlmostEqual(norm(true_eval - fpeval), 0)
# set the z3 input
enc.solver.push()
for i in range(len(invar)):
enc.solver.add(invar[i] == fpval[i])
# run z3 to check for satisfiability
result = enc.solver.check()
#if verbose: print enc.solver
self.assertTrue(result == z3.sat)
# extract the output
model = enc.solver.model()
smteval = np.zeros(len(outvar))
for i in range(len(outvar)):
smteval[i] = amnet.util.mfp(model, outvar[i])
# check that the outputs match
if verbose: print 'smteval: ', smteval
self.assertAlmostEqual(norm(smteval - fpeval), 0)
enc.solver.pop()
def donot_test_SmtEncoder_mu_big(self):
xyz = amnet.Variable(3, name='xyz')
x = amnet.atoms.select(xyz, 0)
y = amnet.atoms.select(xyz, 1)
z = amnet.atoms.select(xyz, 2)
w = amnet.Mu(x, y, z)
def true_mu(fpin):
x, y, z = fpin
return x if z <= 0 else y
self.validate_outputs(
phi=w,
onvals=itertools.product(self.floatvals, repeat=w.indim),
true_f=true_mu
)
def test_SmtEncoder_mu_small(self):
xyz = amnet.Variable(3, name='xyz')
x = amnet.atoms.select(xyz, 0)
y = amnet.atoms.select(xyz, 1)
z = amnet.atoms.select(xyz, 2)
w = amnet.Mu(x, y, z)
def true_mu(fpin):
x, y, z = fpin
return x if z <= 0 else y
self.validate_outputs(
phi=w,
onvals=itertools.product(self.floatvals2, repeat=w.indim),
true_f=true_mu
)
if VISUALIZE: amnet.vis.quick_vis(phi=w, title='mu')
def test_SmtEncoder_max_all_2(self):
xy = amnet.Variable(2, name='xy')
phi_max2 = amnet.atoms.max_all(xy)
self.assertEqual(phi_max2.indim, 2)
def true_max2(fpin):
x, y = fpin
return max(x, y)
self.validate_outputs(
phi=phi_max2,
onvals=itertools.product(self.floatvals, repeat=phi_max2.indim),
true_f=true_max2
)
def test_SmtEncoder_min_all_2(self):
xy = amnet.Variable(2, name='xy')
phi_min2 = amnet.atoms.min_all(xy)
self.assertEqual(phi_min2.indim, 2)
def true_min2(fpin):
x, y = fpin
return min(x, y)
self.validate_outputs(
phi=phi_min2,
onvals=itertools.product(self.floatvals, repeat=phi_min2.indim),
true_f=true_min2
)
def test_SmtEncoder_max_all_3_small(self):
xyz = amnet.Variable(3, name='xy')
phi_max3 = amnet.atoms.max_all(xyz)
self.assertEqual(phi_max3.indim, 3)
def true_max3(fpin):
x, y, z = fpin
return max(x, y, z)
self.validate_outputs(
phi=phi_max3,
onvals=itertools.product(self.floatvals2, repeat=phi_max3.indim),
true_f=true_max3
)
def test_SmtEncoder_min_all_3_small(self):
xyz = amnet.Variable(3, name='xy')
phi_min3 = amnet.atoms.min_all(xyz)
self.assertEqual(phi_min3.indim, 3)
def true_min3(fpin):
x, y, z = fpin
return min(x, y, z)
self.validate_outputs(
phi=phi_min3,
onvals=itertools.product(self.floatvals2, repeat=phi_min3.indim),
true_f=true_min3
)
def test_SmtEncoder_add_all(self):
xyz = amnet.Variable(3, name='xyz')
phi_add = amnet.atoms.add_all(xyz)
self.assertEqual(phi_add.outdim, 1)
self.assertEqual(phi_add.indim, 3)
def true_add(fpin):
return sum(fpin)
self.validate_outputs(
phi=phi_add,
onvals=itertools.product(self.floatvals2, repeat=phi_add.indim),
true_f=true_add
)
def test_SmtEncoder_add_list(self):
xyz = amnet.Variable(2+2+2, name='xyz')
x = amnet.Linear(np.eye(2, 6, 0), xyz)
y = amnet.Linear(np.eye(2, 6, 2), xyz)
z = amnet.Linear(np.eye(2, 6, 4), xyz)
phi_add_list = amnet.atoms.add_list([x, y, z])
self.assertEqual(x.outdim, 2)
self.assertEqual(y.outdim, 2)
self.assertEqual(z.outdim, 2)
self.assertEqual(phi_add_list.outdim, 2)
self.assertEqual(phi_add_list.indim, 6)
def true_add(fpin):
x, y, z = fpin[0:2], fpin[2:4], fpin[4:6]
return x + y + z
self.validate_outputs(
phi=phi_add_list,
onvals=itertools.product(self.floatvals3, repeat=phi_add_list.indim),
true_f=true_add
)
def test_SmtEncoder_triplexer(self):
np.random.seed(1)
TOTAL_RUNS=5
#print ""
for iter in range(TOTAL_RUNS):
#print "Testing random triplexer [%d/%d]..." % (iter+1, TOTAL_RUNS),
# create a random triplexer
x = amnet.Variable(1, name='x')
a = 3 * (2 * np.random.rand(4) - 1)
b = 3 * (2 * np.random.rand(4) - 1)
c = 3 * (2 * np.random.rand(4) - 1)
d = 3 * (2 * np.random.rand(4) - 1)
e = 3 * (2 * np.random.rand(4) - 1)
f = 3 * (2 * np.random.rand(4) - 1)
phi_tri = amnet.atoms.triplexer(x, a, b, c, d, e, f)
def true_tri(fpin):
return amnet.atoms.fp_triplexer(fpin, a, b, c, d, e, f)
xvals = 50 * (2 * np.random.rand(100) - 1)
onvals = itertools.product(xvals, repeat=1)
self.validate_outputs(
phi=phi_tri,
onvals=onvals,
true_f=true_tri
)
#print "done!"
def test_SmtEncoder_max_aff(self):
np.random.seed(1)
m = 10
n = 4
A = np.random.randint(-5, 6, m*n).reshape((m, n))
b = np.random.randint(-5, 6, m).reshape((m,))
b[np.random.randint(0, n)] = 0 # make sure there is a Linear term
x = amnet.Variable(n, name='x')
y = amnet.atoms.max_aff(A, x, b)
s |
harikishen/addons-server | src/olympia/stats/tasks.py | Python | bsd-3-clause | 12,766 | 0 | import datetime
import httplib2
import itertools
from django.conf import settings
from django.db import connection
from django.db.models import Sum, Max
from apiclient.discovery import build
from elasticsearch.helpers import bulk_index
from oauth2client.client import OAuth2Credentials
import olympia.core.logger
from olympia import amo
from olympia.amo import search as amo_search
from olympia.addons.models import Addon
from olympia.amo.celery import task
from olympia.bandwagon.models import Collection
from olympia.reviews.models import Review
from olympia.users.models import UserProfile
from olympia.versions.models import Version
from . import search
from .models import (
AddonCollectionCount, CollectionCount, CollectionStats, DownloadCount,
ThemeUserCount, UpdateCount)
log = olympia.core.logger.getLogger('z.task')
@task
def update_addons_collections_downloads(data, **kw):
log.info("[%s] Updating addons+collections download totals." %
(len(data)))
query = (
"UPDATE addons_collections SET downloads=%s WHERE addon_id=%s "
"AND collection_id=%s;" * len(data))
with connection.cursor() as cursor:
cursor.execute(
query,
list(itertools.chain.from_iterable(
[var['sum'], var['addon'], var['collection']]
for var in data)))
@task
def update_collections_total(data, **kw):
log.info("[%s] Updating collections' download totals." %
(len(data)))
for var in data:
(Collection.objects.filter(pk=var['collection_id'])
.update(dow | nloads=var['sum']))
def get_profile_id(service, domain):
"""
Fetch the profile ID for the given domain.
"""
accounts = service.management().accounts().list().execute()
account_ids = [a['id'] for a in acc | ounts.get('items', ())]
for account_id in account_ids:
webproperties = service.management().webproperties().list(
accountId=account_id).execute()
webproperty_ids = [p['id'] for p in webproperties.get('items', ())]
for webproperty_id in webproperty_ids:
profiles = service.management().profiles().list(
accountId=account_id,
webPropertyId=webproperty_id).execute()
for p in profiles.get('items', ()):
# sometimes GA includes "http://", sometimes it doesn't.
if '://' in p['websiteUrl']:
name = p['websiteUrl'].partition('://')[-1]
else:
name = p['websiteUrl']
if name == domain:
return p['id']
@task
def update_google_analytics(date, **kw):
creds_data = getattr(settings, 'GOOGLE_ANALYTICS_CREDENTIALS', None)
if not creds_data:
log.critical('Failed to update global stats: '
'GOOGLE_ANALYTICS_CREDENTIALS not set')
return
creds = OAuth2Credentials(
*[creds_data[k] for k in
('access_token', 'client_id', 'client_secret',
'refresh_token', 'token_expiry', 'token_uri',
'user_agent')])
h = httplib2.Http()
creds.authorize(h)
service = build('analytics', 'v3', http=h)
domain = getattr(settings,
'GOOGLE_ANALYTICS_DOMAIN', None) or settings.DOMAIN
profile_id = get_profile_id(service, domain)
if profile_id is None:
log.critical('Failed to update global stats: could not access a Google'
' Analytics profile for ' + domain)
return
datestr = date.strftime('%Y-%m-%d')
try:
data = service.data().ga().get(ids='ga:' + profile_id,
start_date=datestr,
end_date=datestr,
metrics='ga:visits').execute()
# Storing this under the webtrends stat name so it goes on the
# same graph as the old webtrends data.
p = ['webtrends_DailyVisitors', data['rows'][0][0], date]
except Exception, e:
log.critical(
'Fetching stats data for %s from Google Analytics failed: %s' % e)
return
try:
cursor = connection.cursor()
cursor.execute('REPLACE INTO global_stats (name, count, date) '
'values (%s, %s, %s)', p)
except Exception, e:
log.critical('Failed to update global stats: (%s): %s' % (p, e))
else:
log.debug('Committed global stats details: (%s) has (%s) for (%s)'
% tuple(p))
finally:
cursor.close()
@task
def update_global_totals(job, date, **kw):
log.info('Updating global statistics totals (%s) for (%s)' % (job, date))
jobs = _get_daily_jobs(date)
jobs.update(_get_metrics_jobs(date))
num = jobs[job]()
q = """REPLACE INTO global_stats (`name`, `count`, `date`)
VALUES (%s, %s, %s)"""
p = [job, num or 0, date]
try:
cursor = connection.cursor()
cursor.execute(q, p)
except Exception, e:
log.critical('Failed to update global stats: (%s): %s' % (p, e))
else:
log.debug('Committed global stats details: (%s) has (%s) for (%s)'
% tuple(p))
finally:
cursor.close()
def _get_daily_jobs(date=None):
"""Return a dictionary of statistics queries.
If a date is specified and applies to the job it will be used. Otherwise
the date will default to the previous day.
"""
if not date:
date = datetime.date.today() - datetime.timedelta(days=1)
# Passing through a datetime would not generate an error,
# but would pass and give incorrect values.
if isinstance(date, datetime.datetime):
raise ValueError('This requires a valid date, not a datetime')
# Testing on lte created date doesn't get you todays date, you need to do
# less than next date. That's because 2012-1-1 becomes 2012-1-1 00:00
next_date = date + datetime.timedelta(days=1)
date_str = date.strftime('%Y-%m-%d')
extra = dict(where=['DATE(created)=%s'], params=[date_str])
# If you're editing these, note that you are returning a function! This
# cheesy hackery was done so that we could pass the queries to celery
# lazily and not hammer the db with a ton of these all at once.
stats = {
# Add-on Downloads
'addon_total_downloads': lambda: DownloadCount.objects.filter(
date__lt=next_date).aggregate(sum=Sum('count'))['sum'],
'addon_downloads_new': lambda: DownloadCount.objects.filter(
date=date).aggregate(sum=Sum('count'))['sum'],
# Listed Add-on counts
'addon_count_new': Addon.objects.valid().extra(**extra).count,
# Listed Version counts
'version_count_new': Version.objects.filter(
channel=amo.RELEASE_CHANNEL_LISTED).extra(**extra).count,
# User counts
'user_count_total': UserProfile.objects.filter(
created__lt=next_date).count,
'user_count_new': UserProfile.objects.extra(**extra).count,
# Review counts
'review_count_total': Review.objects.filter(created__lte=date,
editorreview=0).count,
# We can't use "**extra" here, because this query joins on reviews
# itself, and thus raises the following error:
# "Column 'created' in where clause is ambiguous".
'review_count_new': Review.objects.filter(editorreview=0).extra(
where=['DATE(reviews.created)=%s'], params=[date_str]).count,
# Collection counts
'collection_count_total': Collection.objects.filter(
created__lt=next_date).count,
'collection_count_new': Collection.objects.extra(**extra).count,
'collection_addon_downloads': (
lambda: AddonCollectionCount.objects.filter(
date__lte=date).aggregate(sum=Sum('count'))['sum']),
}
# If we're processing today's stats, we'll do some extras. We don't do
# these for re-processed stats because they change over time (eg. add-ons
# move from sandbox -> public
if date == (datetime.date.toda |
queria/my-tempest | tempest/services/data_processing/v1_1/client.py | Python | apache-2.0 | 11,290 | 0 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest.common import rest_client
from tempest import config
CONF = config.CONF
class DataProcessingClient(rest_client.RestClient):
def __init__(self, auth_provider):
super(DataProcessingClient, self).__init__(auth_provider)
self.service = CONF.data_processing.catalog_type
def _request_and_check_resp(self, request_func, uri, resp_status):
"""Make a request using specified request_func and check response
status code.
It returns pair: resp and response body.
"""
resp, body = request_func(uri)
self.expected_success(resp_status, resp.status)
return resp, body
def _request_check_and_parse_resp(self, request_func, uri, resp_status,
resource_name, *args, **kwargs):
"""Make a request using specified request_func, check response status
code and parse response body.
It returns pair: resp and parsed resource(s) body.
"""
headers = {'Content-Type': 'application/json'}
resp, body = request_func(uri, headers=headers, *args, **kwargs)
self.expected_success(resp_status, resp.status)
body = json.loads(body)
return resp, body[resource_name]
def list_node_group_templates(self):
"""List all node group templates for a user."""
uri = 'node-group-templates'
return self._request_check_and_parse_resp(self.get, uri,
200, 'node_group_templates')
def get_node_group_template(self, tmpl_id):
"""Returns the details of a single node group template."""
uri = 'node-group-templates/%s' % tmpl_id
return self._request_check_and_parse_resp(self.get, uri,
200, 'node_group_template')
def create_node_group_template(self, name, plugin_name, hadoop_version,
node_processes, flavor_id,
node_configs=None, **kwargs):
"""Creates node group template with specified params.
It supports passing additional params using kwargs and returns created
object.
"""
uri = 'node-group-templates'
body = kwargs.copy()
body.update({
'name': name,
'plugin_name': plugin_name,
'hadoop_version': hadoop_version,
'node_processes': node_processes,
'flavor_id': flavor_id,
'node_configs': node_configs or dict(),
})
return self._request_check_and_parse_resp(self.post, uri, 202,
'node_group_template',
body=json.dumps(body))
def delete_node_group_template(self, tmpl_id):
"""Deletes the specified node group template by id."""
uri = 'node-group-templates/%s' % tmpl_id
return self._request_and_check_resp(self.delete, uri, 204)
def list_plugins(self):
"""List all enabled plugins."""
uri = 'plugins'
return self._request_check_and_parse_resp(self.get,
uri, 200, 'plugins')
def get_plugin(self, plugin_name, plugin_version=None):
"""Returns the details of a single plugin."""
uri = 'plugins/%s' % plugin_name
if plugin_version:
uri += '/%s' % plugin_version
return self._request_check_and_parse_resp(self.get, uri, 200, 'plugin')
def list_cluster_templates(self):
"""List all cluster templates for a user."""
uri = 'cluster-templates'
return self._request_check_and_parse_resp(self.get, uri,
200, 'cluster_templates')
def get_cluster_template(self, tmpl_id):
"""Returns the details of a single clus | ter template."""
uri = 'cluster-templates/%s' % tmpl_id
return self._request_check_and_parse_resp(self.get,
uri, 200, 'cluster_template')
def create_cluster_template(self, name, | plugin_name, hadoop_version,
node_groups, cluster_configs=None,
**kwargs):
"""Creates cluster template with specified params.
It supports passing additional params using kwargs and returns created
object.
"""
uri = 'cluster-templates'
body = kwargs.copy()
body.update({
'name': name,
'plugin_name': plugin_name,
'hadoop_version': hadoop_version,
'node_groups': node_groups,
'cluster_configs': cluster_configs or dict(),
})
return self._request_check_and_parse_resp(self.post, uri, 202,
'cluster_template',
body=json.dumps(body))
def delete_cluster_template(self, tmpl_id):
"""Deletes the specified cluster template by id."""
uri = 'cluster-templates/%s' % tmpl_id
return self._request_and_check_resp(self.delete, uri, 204)
def list_data_sources(self):
"""List all data sources for a user."""
uri = 'data-sources'
return self._request_check_and_parse_resp(self.get,
uri, 200, 'data_sources')
def get_data_source(self, source_id):
"""Returns the details of a single data source."""
uri = 'data-sources/%s' % source_id
return self._request_check_and_parse_resp(self.get,
uri, 200, 'data_source')
def create_data_source(self, name, data_source_type, url, **kwargs):
"""Creates data source with specified params.
It supports passing additional params using kwargs and returns created
object.
"""
uri = 'data-sources'
body = kwargs.copy()
body.update({
'name': name,
'type': data_source_type,
'url': url
})
return self._request_check_and_parse_resp(self.post, uri,
202, 'data_source',
body=json.dumps(body))
def delete_data_source(self, source_id):
"""Deletes the specified data source by id."""
uri = 'data-sources/%s' % source_id
return self._request_and_check_resp(self.delete, uri, 204)
def list_job_binary_internals(self):
"""List all job binary internals for a user."""
uri = 'job-binary-internals'
return self._request_check_and_parse_resp(self.get,
uri, 200, 'binaries')
def get_job_binary_internal(self, job_binary_id):
"""Returns the details of a single job binary internal."""
uri = 'job-binary-internals/%s' % job_binary_id
return self._request_check_and_parse_resp(self.get, uri,
200, 'job_binary_internal')
def create_job_binary_internal(self, name, data):
"""Creates job binary internal with specified params."""
uri = 'job-binary-internals/%s' % name
return self._request_check_and_parse_resp(self.put, uri, 202,
'job_binary_internal', data)
def delete_job_binary_internal(self, job_binary_id):
"""Deletes the specifi |
yavdr/yavdr-ansible | plugins/callbacks/auto_tags.py | Python | gpl-3.0 | 1,950 | 0.004615 | """
This module implements an Ansible plugin that is triggered at the start of a playbook.
The plugin dynamically generates a tag for each role. Each tag has the same name as its role.
The advantage of this is that it saves you some boilerplate, because you don't have to wrap
all tasks of a role in an additional block and assign a tag to that.
Additionally, it works automatically when you add new roles to your playbook.
Usage is exactly the same as without this plugin:
ansible-playbook --tags=some_tag provision.yml
Here, the "some_tag" tag was generated dynamically (assuming there is a "some_tag" role).
Installation:
1. Place this file in `plugins/callbacks/auto_tags.py` (relative to your playbook root)
2. Add the fo | llowing two lines to your `ansible.cfg` file:
callback_plugins = plugins/callbacks
callback_whitelist = auto_tags
"""
from __future__ import pri | nt_function
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""
Ansible supports several types of plugins. We are using the *callback* type here, since
it seemed the best choice for our use case, because it allows you to hook into the start
of a playbook.
"""
def v2_playbook_on_start(self, playbook):
"""
Dynamically add a tag of the same name to each role.
Note: Plays, roles, task_blocks and tasks can have tags.
"""
plays = playbook.get_plays()
# Note: Although identical roles are shared between plays we cannot deduplicate them,
# since Ansible treats them as different objects internally
roles = [role for play in plays for role in play.get_roles()]
# Note: Tags for roles are set dynamically in `_load_role_data` instead of in __init__
# I don't know why they do that.
for role in roles:
role_name = role._role_name
if role_name not in role.tags:
role.tags += [role_name]
|
srlang/void-tool | void.py | Python | gpl-2.0 | 1,552 | 0.00451 | #
# void.py
# Python-written helper tool to Void Linux's xbps-* tools
# Written for personal ease of use, not robustness.
#
# Copyright (c) 2014Sean R. Lang <srlang@ncsu.edu>
#
from subprocess import call # for external shell ca | lls
from sys import argv # for command line arguments
##########################################################################
# xbps subcommands
def void_install(args):
'''Install the specified package atoms.'''
pass
def void_remove(args):
'''Remove the specified package atoms.'''
pass
def | void_query(args):
'''Perform a search operation for a package atom'''
pass
def find_command(cmd):
if "-I" in cmd:
return void_install
elif "-Q" in cmd:
return void_query
elif "-R" in cmd:
return void_remove
else:
return usage()
##########################################################################
##########################################################################
# Usage handling items
USAGE_MESSAGE="""
Usage: void <operation>[flags] [package]
operation: -I, -Q, -R
-I: Install
-Q: Query
-R: Remove
"""
def usage(args=None):
'''Display a usage message to the user.'''
print USAGE_MESSAGE
##########################################################################
def main(argv):
'''Main program logic'''
if len(argv) > 1:
cmd = argv[1]
func = find_command(cmd)
func(argv[1:])
else:
usage()
if __name__ == '__main__':
main(argv)
|
SylvainTakerkart/vobi_one | lib/python2.5/site-packages/oidata/oisession_preprocesses.py | Python | gpl-3.0 | 15,602 | 0.016152 | # Author: Philippe Katz <philippe.katz@gmail.com>,
# Flavien Garcia <flavien.garcia@free.fr>,
# Sylvain Takerkart <Sylvain.Takerkart@incm.cnrs-mrs.fr>
# License: BSD Style.
try:
from neuroProcesses import *
except:
print 'Impossible to import neurProcesses'
def print_fonc(string,context=None):
"""Print function for BrainVISA
Print function for BrainVISA. Desactivated when context is None (for a use in bash mode)
Parameters
----------
string : str
The string to print
context : brainvisa context, optional
Can be None for a use in bash mode or the BrainVISA context for a use in BrainVISA graphical mode
"""
if context is None: # If no context, for a use in BrainVISA shell
return
else:
context.write(_t_(string)) # | If used in BrainVISA graphical mode
def warning_fonc(string,context=None):
"""Warning function for BrainVISA
Warning function for BrainVISA.
Print :
* a string in bash when context is None (for a use in bash mode)
* a warning message in BrainVISA user interface when context is BrainVISA context
Parameters
----------
string : str
The string to pr | int
context : brainvisa context, optional
Can be None for a use in bash mode or the BrainVISA context for a use in BrainVISA graphical mode
"""
if context is None: # If no context, for a use in BrainVISA shell
print string
else:
context.warning(_t_(string)) # If used in BrainVISA graphical mode
def error_fonc(string,context=None):
"""Error function for BrainVISA
Error function for BrainVISA.
Print :
* a string in bash when context is None (for a use in bash mode)
* a error message in BrainVISA user interface when context is BrainVISA context
Parameters
----------
string : str
The string to print
context : brainvisa context, optional
Can be None for a use in bash mode or the BrainVISA context for a use in BrainVISA graphical mode
"""
if context is None: # If no context, for a use in BrainVISA shell
print string
else:
context.error(_t_(string)) # If used in BrainVISA graphical mode
def interruption( context ):
"""Interruption function for BrainVISA
Interruption function of a script for BrainVISA. Stops the current process.
Parameters
----------
context : brainvisa context
Can be None for a use in bash mode or the BrainVISA context for a use in BrainVISA graphical mode
"""
if context is None: # If no context, for a use in BrainVISA shell
return
else:
context.checkInterruption( ) # If used in BrainVISA graphical mode
try:
import oisession #Import of session-level class for optical imaging
except:
import oidata.oisession as oisession #Import of session-level class for optical imaging
def create_physio_params_file_process( database,protocol,subject,session, raw_physio_file_input, mode = False, context=None ,script=False):
"""Creates the file containing the frequency and phase values for physiological variables (heartbeat and
respiration) which are estimated externally
It is read to define a trial-specific design matrix from the session design matrix.
Parameters
----------
raw_physio_file_input : str
path of the raw unimported file containing the physio parameters
database : str
Database's path
protocol : str
Protocol's name
sujet : str
Subject's name
session : str
Session directory. Given by the prefix "session_" and the session date.
mode : bool, optional
The database mode
context : brainvisa context, optional
Can be None for a use in bash mode or the BrainVISA context for a use in BrainVISA graphical mode
script : script context, optional
If True (i.e. when called from a script), avoid to apply update_database to speed up the process
Returns
-------
path : str
Physio file's path
"""
print_fonc("Initialization",context)
this_session = oisession.OiSession( database,protocol,subject,session ) # OiSession instance initialization
print_fonc("Creation of the condition file",context)
# Creation of the file containing the physiological params
physio_param_fname = this_session.create_physio_params_file_func(raw_physio_file_input)
if context == None and mode == True and script == False: # For a use in BrainVISA shell, in database mode
this_session.update_database() # Updating database
return physio_param_fname
def create_trials_conds_file_process( database,protocol,subject,session, mode = False, context=None ,script=False):
"""Creates the conditions file
Creates a condition file, containing the paths of datas and their conditions of experimentation. It is read to perform the session-level processes
Parameters
----------
database : str
Database's path
protocol : str
Protocol's name
subject : str
Subject's name
session : str
Session directory. Given by the prefix "session_" and the session date.
mode : bool, optional
The database mode
context : brainvisa context, optional
Can be None for a use in bash mode or the BrainVISA context for a use in BrainVISA graphical mode
script : script context, optional
If True, avoid to apply update_database in a script function to don't
slow execution
Returns
-------
path : str
Condition file's path
"""
print_fonc("Initialization",context)
this_session = oisession.OiSession( database,protocol,subject,session ) # OiSession instance initialization
print_fonc("Creation of the condition file",context)
path = this_session.create_trials_conds_file_func() # Condition file creation
if context == None and mode == True and script == False: # For a use in BrainVISA shell, in database mode
this_session.update_database() # Updating database
return path
def spectral_analysis_process( database,protocol,subject,session,analysis,conditions, corner0=None, corner1=None, path_mask=None, format='.nii', data_graph='~/',context=None):
"""Calculate the mean of spectrums of each trial and save a data graph
Parameters
----------
database : str
Database's path
protocol : str
Protocol's name
subject : str
Subject's name
session : str
Session directory. Given by the prefix "session_" and the session date.
analysis : str
Analysis' directory
conditions : tuple of list
A tuple of list of condition which has to be averaged
Exemple :
([5,2]) The files which have for condtions 5 and 2 will be averaged together.
corner0 : int tuple, optional
Top left-hand corner
corner1 : int tuple, optional
Bottom right-hand corner
path_mask : str, optional
Path of binary mask
format : {'.nii','.nii.gz'}
Saving format of images. It can be NIFTI-1 Image ('.nii') or gzip compressed NIFTI-1 Image ('.nii.gz')
data_graph : str
Path of the data graph created
context : brainvisa context, optional
Can be None for a use in bash mode or the BrainVISA context for a use in BrainVISA graphical mode
"""
print_fonc("Initialization",context)
mySAP = oisession.OiSession(database,protocol,subject,session,format=format) # OiSession instance initialization
print_fonc("Create file list for getting averaged spectrum",context)
mySAP.create_filelist_for_averaging(analysis,conditions)
mySAP.spectrums_list=mySAP.paths_list_list_for_averaging[0]
mySAP.load_averaged_img(path=mySAP.spectrums_list[0]) # Loads an existing session averaged image
if path_mask == None:
mySAP.create_rectangle_mask(corner0, corner1) # Creates a rectangle mask which defines the region to average
else:
print_fonc("Loading mask",context)
db_fit=mySAP.load_mask(path=path_mask) # Loads an exi |
Itxaka/st2 | st2actions/tests/unit/test_paramiko_ssh.py | Python | apache-2.0 | 9,657 | 0.000207 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from StringIO import StringIO
import unittest2
from mock import (call, patch, Mock, MagicMock)
import paramiko
from st2actions.runners.ssh.paramiko_ssh import ParamikoSSHClient
from st2tests.fixturesloader import get_resources_base_path
import st2tests.config as tests_config
tests_config.parse_args()
class ParamikoSSHClientTests(unittest2.TestCase):
@patch('paramiko.SSHClient', Mock)
def setUp(self):
"""
Creates the object patching the actual connection.
"""
conn_params = {'hostname': 'dummy.host.org',
'port': 8822,
'username': 'ubuntu',
'key': '~/.ssh/ubuntu_ssh',
'timeout': '600'}
self.ssh_cli = ParamikoSSHClient(**conn_params)
@patch('paramiko.SSHClient', Mock)
def test_create_with_password(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'password': 'ubuntu'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'password': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
@patch('paramiko.SSHClient', Mock)
def test_deprecated_key_argument(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key': 'id_rsa'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'key_filename': 'id_rsa',
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
def test_key_files_and_key_material_arguments_are_mutual_exclusive(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key_files': 'id_rsa',
'key_material': 'key'}
expected_msg = ('key_files and key_material arguments are mutually '
'exclusive')
self.assertRaisesRegexp(ValueError, expected_msg,
ParamikoSSHClient, **conn_params)
@patch('paramiko.SSHClient', Mock)
def test_key_material_argument(self):
path = os.path.join(get_resources_base_path(),
'ssh', 'dummy_rsa')
with open(path, 'r') as fp:
private_key = fp.read()
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key_material': private_key}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
pkey = paramiko.RSAKey.from_private_key(StringIO(private_key))
expected_conn = {'username': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'pkey': pkey,
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
@patch('paramiko.SSHClient', Mock)
def test_key_material_argument_invalid_key(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key_material': 'id_rsa'}
mock = ParamikoSSHClient(**conn_params)
expected_msg = 'Invalid or unsupported key type'
self.assertRaisesRegexp(paramiko.ssh_exception.SSHException,
expected_msg, mock.connect)
@patch('paramiko.SSHClient', Mock)
def test_create_with_key(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key_files': 'id_rsa'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'key_filename': 'id_rsa',
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
@patch('paramiko.SSHClient', Mock)
def test_create_with_password_and_key(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'password': 'ubuntu',
'key': 'id_rsa'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'password': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.hos | t.org',
'look_for_keys': False,
'key_filename': 'id_rsa',
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
@patch('paramiko.SSHClient', Mock)
def test_create_without_creden | tials(self):
"""
Initialize object with no credentials.
Just to have better coverage, initialize the object
without 'password' neither 'key'.
"""
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'hostname': 'dummy.host.org',
'allow_agent': True,
'look_for_keys': True,
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
@patch.object(ParamikoSSHClient, '_consume_stdout',
MagicMock(return_value=StringIO('')))
@patch.object(ParamikoSSHClient, '_consume_stderr',
MagicMock(return_value=StringIO('')))
@patch.object(os.path, 'exists', MagicMock(return_value=True))
@patch.object(os, 'stat', MagicMock(return_value=None))
def test_basic_usage_absolute_path(self):
"""
Basic execution.
"""
mock = self.ssh_cli
# script to execute
sd = "/root/random_script.sh"
# Connect behavior
mock.connect()
mock_cli = mock.client # The actual mocked object: SSHClient
expected_conn = {'username': 'ubuntu',
'key_filename': '~/.ssh/ubuntu_ssh',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'timeout': '600',
'port': 8822}
mock_cli.connect.assert_called_once_with(**expected_conn)
mock.put(sd, sd, mirror_local_mode=False)
mock_cli.open_sftp().put.assert_called_once_with(sd, sd)
mock.run(sd)
# Make assertions over 'run' method
mock_cli.get_transport().open_session().ex |
Samweli/inasafe | safe/utilities/test/test_gis.py | Python | gpl-3.0 | 13,312 | 0.000075 | # coding=utf-8
"""Test for GIS utilities functions."""
import unittest
import numpy
from os.path import join
# noinspection PyUnresolvedReferences
import qgis # pylint: disable=unused-import
from PyQt4.QtCore import QVariant
from os.path import join
from safe.utilities.gis import (
layer_attribute_names,
is_polygon_layer,
buffer_points,
validate_geo_array)
from safe.common.exceptions import RadiiException
from safe.test.utilities import (
TESTDATA,
HAZDATA,
clone_shp_layer,
compare_two_vector_layers,
clone_raster_layer,
standard_data_path,
load_layer,
get_qgis_app)
from safe.utilities.gis import get_optimal_extent
from safe.common.exceptions import BoundingBoxError, InsufficientOverlapError
from safe.storage.core import read_layer
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
class TestQGIS(unittest.TestCase):
def test_get_layer_attribute_names(self):
"""Test we can get the correct attributes back"""
layer = clone_shp_layer(
name='district_osm_jakarta',
include_keywords=True,
source_directory=standard_data_path('boundaries'))
# with good attribute name
attributes, position = layer_attribute_names(
layer,
[QVariant.Int, QVariant.String],
'TEST_STR')
expected_attributes = ['KAB_NAME', 'TEST_STR', 'TEST_INT']
expected_position = 1
message = 'expected_attributes, got %s, expected %s' % (
attributes, expected_attributes)
self.assertEqual(attributes, expected_attributes, message)
message = 'expected_position, got %s, expected %s' % (
position, expected_position)
self.assertEqual(position, expected_position, message)
# with non existing attribute name
attributes, position = layer_attribute_names(
layer,
[QVariant.Int, QVariant.String],
'MISSING_ATTR')
expected_attributes = ['KAB_NAME', 'TEST_STR', 'TEST_INT']
expected_position = None
message = 'expected_attributes, got %s, expected %s' % (
attributes, expected_attributes)
self.assertEqual(attributes, expected_attributes, message)
message = 'expected_position, got %s, expected %s' % (
position, expected_position)
self.assertEqual(position, expected_position, message)
# with raster layer
layer = clone_raster_layer(
name='padang_tsunami_mw8',
extension='.tif',
include_keywords=True,
source_directory=standard_data_path('hazard')
)
attributes, position = layer_attribute_names(layer, [], '')
message = 'Should return None, None for raster layer, got %s, %s' % (
attributes, position)
assert (attributes is None and position is None), message
def test_is_polygonal_layer(self):
"""Test we can get the correct attributes back"""
# Polygon layer
layer = clone_shp_layer(
name='district_osm_jakarta',
include_keywords=True,
source_directory=standard_data_path('boundaries'))
message = 'isPolygonLayer, %s layer should be polygonal' % layer
self.assertTrue(is_polygon_layer(layer), message)
# Point layer
layer = clone_shp_layer(
name='volcano_point',
include_keywords=True,
source_directory=standard_data_path('hazard'))
message = '%s layer should be polygonal' % layer
self.assertFalse(is_polygon_layer(layer), message)
layer = clone_raster_layer(
name='padang_tsunami_mw8',
extension='.tif',
include_keywords=True,
source_directory=standard_data_path('hazard')
)
message = ('%s raster layer should not be polygonal' % layer)
self.assertFalse(is_polygon_layer(layer), message)
def test_validate_geo_array(self):
"""Test validate geographic extent method.
.. versionadded:: 3.2
"""
# Normal case
min_longitude = 20.389938354492188
min_latitude = -34.10782492987083
max_longitude = 20.712661743164062
max_latitude = -34.008273470938335
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertTrue(validate_geo_array(extent))
# min_latitude >= max_latitude
min_latitude = 34.10782492987083
max_latitude = -34.008273470938335
min_longitude = 20.389938354492188
max_longitude = 20.712661743164062
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
# min_longitude >= max_longitude
min_latitude = -34.10782492987083
max_latitude = -34.008273470938335
min_longitude = 34.10782492987083
max_longitude = -34.008273470938335
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
# min_latitude < -90 or > 90
min_latitude = -134.10782492987083
max_latitude = -34.008273470938335
min_longitude = 20.389938354492188
max_longitude = 20.712661743164062
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
# max_latitude < -90 or > 90
min_latitude = -9.10782492987083
max_latitude = 91.10782492987083
min_longitude = 20.389938354492188
max_longitude = 20.712661743164062
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
# min_longitude < -180 or > 180
min_latitude = -34.10782492987083
max_latitude = -34.008273470938335
min_longitude = -184.10782492987083
max_longitude = 20.712661743164062
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
# max_longitude < -180 or > 180
min_latitude = -34.10782492987083
max_latitude = -34.008273470938335
min_longitude = 20.389938354492188
max_longitude = 180.712661743164062
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
def test_get_optimal_extent(self):
"""Optimal extent is calculated correctly"""
exposure_path = join(TESTDATA, 'Population_2010.asc')
hazard_path = join(HAZDATA, 'Lembang_Earthquake_Scenario.asc')
# Expected data
haz_metadata = {
'bounding_box': (
105.3000035,
-8.3749994999999995,
110.2914705,
-5.5667784999999999),
'resolution': (
0.0083330000000000001,
0.0083330000000000001)}
exp_metadata = {
'bounding_box': (
94.972335000000001,
-11.009721000000001,
141.0140016666665,
6.0736123333332639),
| 'resolution': (
0.0083333333333333003,
| 0.0083333333333333003)}
# Verify relevant metada is ok
H = read_layer(hazard_path)
E = read_layer(exposure_path)
hazard_bbox = H.get_bounding_box()
assert numpy.allclose(hazard_bbox, haz_metadata['bounding_box'],
rtol=1.0e-12, atol=1.0e-12)
exposure_bbox = E.get_bounding_box()
assert numpy.allclose(exposure_bbox, exp_metadata['bounding_box'],
rtol=1.0e-12, atol=1.0e-12)
hazard_res = H.get_resolution()
assert numpy.allclose(hazard_res, haz_metadata['resolution'],
rtol=1.0e-12, atol=1.0e-12)
exposure_res = E.get_resolution()
assert numpy.allclose(exposure_res, exp_metadata['resolution'],
rtol=1.0e-12, atol=1.0e-12)
# First, do some examples that produce valid results
|
csparpa/django-httpbin | django_httpbin/urls.py | Python | mit | 148 | 0.006757 | from django.conf.urls import patterns, include
urlpatterns = | patterns('',
(r'^django-httpbin', include('django_httpbin.httpbin | .endpoints')),
)
|
bevpy/pcvilag-dl | pcvilag-dl.py | Python | gpl-3.0 | 5,648 | 0.010979 | #!/usr/bin/env python
# coding: utf-8
"""
pcvilag.muskatli.hu downloader
Copyright (C) 2015 Gyulai Gergő
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import platform
import urllib
import re
from subprocess import call
#get platform
global PF
PF = platform.system()
global PICS
#set HOME for the files...
if PF=='Windows':
HOME = "C:\\PCVilag-docs\\"
BS = "\\"
else:
HOME = os.path.expanduser('~')+"/PCVilag-docs/"
BS = "/"
#Check the root folder... if not exists it will create
#if it exists do nothing
def check_root():
print "Checking folders..."
if not os.path.exists(HOME):
print "Creating ROOT...",
os.mkdir(HOME, 0755)
print "DONE"
if not os.path.exists(HOME+"books"):
print "Creating BOOKS...",
os.mkdir(HOME+"books", 0755)
print "DONE"
else:
print "BOOKS: OK"
if not os.path.exists(HOME+"mags"):
print "Creating MAGS...",
os.mkdir(HOME+"mags", 0755)
print "DONE"
else:
print "MAGS: OK"
else:
print "ROOT: OK"
if not os.path.exists(HOME+"books"):
print "Creating BOOKS...",
os.mkdir(HOME+"books", 0755)
print "DONE"
else:
print "BOOKS: OK"
if not os.path.exists(HOME+"mags"):
print "Creating MAGS...",
os.mkdir(HOME+"mags", 0755)
print "DONE"
else:
print "MAGS: OK"
#Make folders for files
def make_folders(u_name, book, full):
if book:
print "Making folder: \""+u_name.split(BS)[1]+"\" in books...",
if not os.path.exists(HOME+u_name):
os.mkdir(HOME+u_name, 0755)
if os.path.exists(HOME+u_name) and not os.path.exists(HOME+u_name+BS+"src"+BS):
os.mkdir(HOME+u_name+BS+"src"+BS, 0755)
else:
print "Making folder: \""+u_name.split(BS)[1]+"\" in mags...",
if not os.path.exists(HOME+u_name):
os.mkdir(HOME+u_name, 0755)
if os.path.exists(HOME+u_name) and not os.path.exists(HOME+u_name+full[5]):
os.mkdir(HOME+u_name+full[5], 0755)
if os.path.exists(HOME+u_name+full[5]) and not os.path.exists(HOME+u_name+full[5]+BS+full[6]):
os.mkdir(HOME+u_name+full[5]+BS+full[6], 0755)
if os.path.exists(HOME+u_name+full[5]+BS+full[6]) and not os.path.exists(HOME+u_name+full[5]+BS+full[6]+BS+"src"+BS):
os.mkdir(HOME+u_name+full[5]+BS+full[6]+BS+"src"+BS, 0755)
print "DONE"
#Check the url
def check_url(url):
pattern = re.compile("http:\/\/pcvilag.muskatli.hu\/[a-zA-Z0-9\/]+\.html")
return pattern.match(url)
#get links from */link.php
def get_piclinks(url):
print "Get pictures' link...",
s = url.split("/")
alap = "/".join(s[:-1])+"/"
link = alap+"link.php"
open = urllib.urlopen(link)
data = open.read()
open.close()
tmp = re.findall("kep\.php\?kepparam\=([a-zA-z0-9\-]+\.\w+)",data)
res = []
i=0
for element in tmp:
res.append(alap+element)
i+=1
print "DONE"
return res
#download pictures
def get_pics(links, path, book, full):
print "Downloading pictures..."
i=1
if book:
path = path+"src"+BS
else:
path = path+full[5]+BS+full[6]+BS+"src"+BS
for element in links:
print str(i).rjust(5)+".",
e=None
|
try:
urllib.urlretrieve(element,path+str(i)+"."+element.split("/")[-1].split(".")[-1])
| e=None
except IOError,e:
print "Error: This file is no longer available"
i-=1
if e==None:
print "DONE"
i+=1
global PICS
PICS = i-1
print "ALL PICTURES DOWNLOADED"
#call bash/cmd with ImageMagick convert method
def convertToPDF(path,u_name, book, full):
print "Converting pictures to one PDF...",
if book:
os.system("convert -limit thread 1 "+path+"src"+BS+"%d.jpg[1-"+str(PICS)+"] "+path+u_name.split(BS)[1]+".pdf")
else:
path = path+full[5]+BS
os.system("convert -limit thread 1 "+path+full[6]+BS+"src"+BS+"%d.jpg[1-"+str(PICS)+"] "+path+u_name.split(BS)[1]+"-"+full[6]+".pdf")
print "DONE"
#MAIN
def main():
check_root()
url = raw_input("Enter URL:\n")
while(not check_url(url)):
print "It's not a valid url"
url = raw_input("Enter a valid URL:\n")
s = url.split("/")
if s[4]=='cbooks':
NAME = "books"+BS+s[5]+BS
BOOK = True
else:
BOOK = False
NAME = "mags"+BS+s[4]+BS
#todo: unique folder name....
make_folders(NAME, BOOK, s)
piclinks = get_piclinks(url)
get_pics(piclinks, HOME+NAME, BOOK, s)
convertToPDF(HOME+NAME, NAME, BOOK, s)
print "Downloaded and converted files are here: "+HOME+NAME
#############################################################################
if __name__=="__main__":
main()
|
google/makani | analysis/system_design/site.py | Python | apache-2.0 | 3,837 | 0.004431 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for wind site modeling within system design tool.
This module is intended to model the properties associated with a wind site that
impact the determination of the levelized cost of energy at the wind farm level.
In its current state there are no functions, so it is just a container for the
associated characteristics.
"""
class Site(object):
"""Models a wind site.
This class contains characteristics that are specific to an individual wind
site. This includes the probability distribution of specific wind velocities,
the probability distribution of the wind shear coefficient, and the initial
capital costs.
Attributes:
site_name: A string containing the name of the site.
velocity_distribution: A dictionary containing velocity and probability
information specific to the site.
shear_distribution: A dictonary containing shear coefficient and probability
information specific to the site.
capital_costs: A float describing the initial capital cost for the site.
"""
def __init__(self, site_name=None, velocity_distribution=None,
shear_distribution=None, capital_costs=None):
"""Constructor.
Args:
site_name: (Optional) String containing name of site.
velocity_distribution: (Optional) Dictionary with keys 'velocity' and
'probability' that have values of 1D lists of equal length containing
sets of velocity and the associated probability of that velocity.
Velocity values must be floats monotonically increasing from 0.0, and
probability values must be floats that are zero or greater and sum to
1.0.
shear_distribution: (Optional) Dictionary with keys 'shear_coefficient'
and 'probability' that have 1D lists of equal length containing sets
of shear coefficient and the associated probability of that shear
coefficient. Shear coefficient values must be floats monotonically
increasing from 0.0, and probability values must be floats that are
zero or greater and sum to 1.0.
capital_costs: (Optional) Initial capital cost [$USD/m^2] for the site.
"""
# TODO: Allow for wind class as a method of definition.
# TODO: Add description wind shear with reference height.
# TODO: Add input checking.
self._site_name = 'Default Site Name' if site_name is None else site_name
if velocity_distribution is None:
self._velocity_distribution = {
'velocity': | [0.0, 10.0, 20.0],
'probability': [0.2, 0.6, 0.2]
}
else:
self._velocity_distribution = velocity_distribution
if shear_distribution is None:
self._shear_distribution = {
' | shear_coefficient': [0.0, 0.1, 0.2],
'probability': [0.0, 0.9, 0.1]
}
else:
self._shear_distribution = shear_distribution
self._capital_costs = 1.0 if capital_costs is None else capital_costs
@property
def site_name(self):
return self._site_name
@property
def velocity_distribution(self):
return self._velocity_distribution
@property
def shear_distribution(self):
return self._shear_distribution
@property
def capital_costs(self):
return self._capital_costs
|
ranog/coursera_python | buzz.py | Python | gpl-3.0 | 155 | 0 | #!/usr/bin/env python3
numero = i | nt(input("Digite um número inteiro: "))
resto = numero % 5
if(resto == 0):
print( | "Buzz")
else:
print(numero)
|
araisrobo/linuxcnc | src/emc/usr_intf/axis/scripts/linuxcnctop.py | Python | lgpl-2.1 | 7,389 | 0.012451 | #!/usr/bin/env python2
# This is a component of AXIS, a front-end for linuxcnc
# Copyright 2004, 2005, 2006 Jeff Epler <jepler@unpythonic.net>
# and Chris Radek <chris@timeguy.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, os
import linuxcnc, time
import rs274.options
import gettext
BASE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
gettext.install("linuxcnc", localedir=os.path.join(BASE, "share", "locale"), unicode=True)
if len(sys.argv) > 1 and sys.argv[1] == '-ini':
ini = linuxcnc.ini(sys.argv[2])
linuxcnc.nmlfile = ini.find("EMC", "NML_FILE") or linuxcnc.nmlfile
del sys.argv[1:3]
s = linuxcnc.stat(); s.poll()
def show_mcodes(l):
return " ".join(["M%g" % i for i in l[1:] if i != -1])
def show_gcodes(l):
return " ".join(["G%g" % (i/10.) for i in l[1:] if i != -1])
def show_position(p):
return " ".join(["%-8.4f" % n for i, n in enumerate(p) if s.axis_mask & (1<<i)])
joint_position = " ".join(["%-8.4f"] * s.joints)
def show_joint_position(p):
return joint_position % p[:s.joints]
perjoint = " ".join(["%s"] * s.joints)
def show_perjoint(p):
return perjoint % p[:s.joints]
def show_float(p): return "%-8.4f" % p
maps = {
'exec_state': {linuxcnc.EXEC_ERROR: 'error',
linuxcnc.EXEC_DONE: 'done',
linuxcnc.EXEC_WAITING_FOR_MOTION: 'motion',
linuxcnc.EXEC_WAITING_FOR_MOTION_QUEUE: 'motion queue',
linuxcnc.EXEC_WAITING_FOR_IO: 'io',
linuxcnc.EXEC_WAITING_FOR_MOTION_AND_IO: 'motion and io',
linuxcnc.EXEC_WAITING_FOR_DELAY: 'delay',
linuxcnc.EXEC_WAITING_FOR_SYSTEM_CMD: 'system command'},
'motion_mode':{linuxcnc.TRAJ_MODE_FREE: 'free', linuxcnc.TRAJ_MODE_COORD: 'coord',
linuxcnc.TRAJ_MODE_TELEOP: 'teleop'},
'interp_state':{linuxcnc.INTERP_IDLE: 'idle', linuxcnc.INTERP_PAUSED: 'paused',
linuxcnc.INTERP_READING: 'reading', linuxcnc.INTERP_WAITING: 'waiting'},
'task_state': {linuxcnc.STATE_ESTOP: 'estop', linuxcnc.STATE_ESTOP_RESET: 'estop reset',
linuxcnc.STATE_ON: 'on', linuxcnc.STATE_OFF: 'off'},
'task_mode': {linuxcnc.MODE_AUTO: 'auto', linuxcnc.MODE_MDI: 'mdi',
linuxcnc.MODE_MANUAL: 'manual'},
'state': {1: 'rcs_done', 2: 'rcs_exec', 3: 'rcs_error'},
'motion_type': {0: 'none', 1: 'traverse', 2: 'feed', 3: 'arc', 4: 'toolchange', 5: 'probing'},
'program_units': {1: 'inch', 2: 'mm'},
'kinematics_type': {linuxcnc.KINEMATICS_IDENTITY: 'identity', linuxcnc.KINEMATICS_FORWARD_ONLY: 'forward_only',
linuxcnc.KINEMATICS_INVERSE_ONLY: 'inverse_only', linuxcnc.KINEMATICS_BOTH: 'both'},
'mcodes': show_mcodes, 'gcodes': show_gcodes, 'poll': None, 'tool_table': None,
'axis': None, 'joint': None, 'gettaskfile': None,
'actual_position': show_position,
'position': show_position,
'dtg': show_position,
'origin': show_position,
'rotation_xy': show_float,
'probed_position': show_position,
'tool_offset': show_position,
'g5x_offset': show_position,
'g92_offset': show_position,
'linear_units': show_float,
'max_acceleration': show_float,
'max_velocity': show_float,
'angular_units': show_float,
'distance_to_go': show_float,
'current_vel': show_float,
'limit': show_perjoint,
'homed': show_perjoint,
'joint_position': show_joint_position,
'joint_actual_position': show_joint_position,
}
if s.kinematics_type == 1:
maps['joint_position'] = None
maps['joint_actual_position'] = None
def gui():
import Tkinter
from _tkinter import TclError
root = Tkinter.Tk(className="LinuxCNCTop")
rs274.options.install(root)
root.title(_("LinuxCNC Status"))
t = Tkinter.Text()
sb = Tkinter.Scrollbar(command=t.yview)
t.configure(yscrollcommand=sb.set)
t.configure(tabs="150")
base_font = t.tk.call("set", "BASE_FONT")
fixed_font = t.tk.call("set", "FIXED_FONT")
t.tag_configure("key", foreground="blue", font=base_font)
t.tag_configure("value", foreground="black", font=fixed_font)
t.tag_configure("changedvalue", foreground="black", background="red", font=fixed_font)
t.tag_configure("sel", foreground="white")
t.tag_raise("sel")
t.bind("<KeyPress>", "break")
b = Tkinter.Button(text=_("Copy All"),
command="%s tag add sel 0.0 end; tk_textCopy %s" % (t, t))
b.pack(side="bottom", anchor="sw")
t.pack(side="left", expand=1, fill="both")
sb.pack(side="left", expand=0, fill="y")
changetime = {}
oldvalues = {}
def timer():
try:
s.poll()
except linuxcnc.error:
root.destroy()
pos = t.yview()[0]
selection = t.tag_ranges("sel")
insert_point = t.index("insert")
insert_gravity = t.mark_gravity("insert")
try:
anchor_point = t.index("anchor")
anchor_gravity = t.mark_gravity("anchor")
exce | pt Tcl | Error:
anchor_point = None
t.delete("0.0", "end")
first = True
for k in dir(s):
if k.startswith("_"): continue
if maps.has_key(k) and maps[k] == None: continue
v = getattr(s, k)
if maps.has_key(k):
m = maps[k]
if callable(m):
v = m(v)
else:
v = m.get(v, v)
if oldvalues.has_key(k):
changed = oldvalues[k] != v
if changed: changetime[k] = time.time() + 2
oldvalues[k] = v
if changetime.has_key(k) and changetime[k] >= time.time():
vtag = "changedvalue"
else:
vtag = "value"
if first: first = False
else: t.insert("end", "\n")
t.insert("end", k, "key", "\t")
t.insert("end", v, vtag)
t.yview_moveto(pos)
if selection:
t.tag_add("sel", *selection)
t.mark_set("insert", insert_point)
t.mark_gravity("insert", insert_gravity)
if anchor_point is not None:
t.mark_set("anchor", anchor_point)
t.mark_gravity("anchor", anchor_gravity)
t.after(100, timer)
timer()
t.mainloop()
def text():
s.poll()
for k in dir(s):
if k.startswith("_"): continue
if maps.has_key(k) and maps[k] == None: continue
v = getattr(s, k)
if maps.has_key(k):
m = maps[k]
if callable(m):
v = m(v)
else:
v = m.get(v, v)
print "%-20s %-.58s" % (k, v)
if len(sys.argv) > 1 and sys.argv[1] == '-t':
text()
else:
gui()
# vim:sw=4:sts=4:et
|
rpufky/trafficserver | tests/tools/sessionvalidation/session.py | Python | apache-2.0 | 1,826 | 0.001643 | '''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, s | oftware
# distributed under the Lic | ense is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sessionvalidation.transaction as transaction
class Session(object):
''' Session encapsulates a single user session '''
def getTransactionList(self):
''' Returns a list of transaction objects '''
return self._transaction_list
def getTransactionIter(self):
''' Returns an iterator of transaction objects '''
return iter(self._transaction_list)
def returnFirstTransaction(self):
return self._transaction_list[0]
def __repr__(self):
return "<Session {{'filename': {0}, 'version': {1}, 'timestamp: {2}, 'encoding': {3}, 'transaction_list': {4}}}>".format(
self._filename, self._version, self._timestamp, self._encoding, repr(self._transaction_list)
)
def __init__(self, filename, version, timestamp, transaction_list, encoding=None):
self._filename = filename
self._version = version
self._timestamp = timestamp
self._encoding = encoding
self._transaction_list = transaction_list
|
dbjohnson/advent-of-code | solutions/day18/solution.py | Python | mit | 1,625 | 0.001231 | class Automata(object):
def __init__(self, row, col, state):
self.row = row
self.col = col
self.init_state = state
self.state = state
self.neighbors = []
def connect(self, neighbor):
self.neighbors.append(neighbor)
def calc_next_state(self):
on_count = sum(1 if n.state else 0 for n in self.neighbors)
if self.state:
self.next_state = on_count in (2, 3)
else:
self.next_state = on_count == 3
def update(self):
self.state = self.next_state
def reset(self):
self.state = self.init_state
world = []
with open('input.txt', 'r') as fh:
for i, line in enumerate(fh):
world.append([Automata(i, j, c == '#') for j, c in enumerate(line.strip())])
for i, row in enumerate(world):
for j, a in enumerate(row):
for ii in xrange(max(0, i - 1), min(len(world), i + 2)):
for jj in xrange(max(0, j - 1), min(len(row), j + 2)):
if ii != i or jj != j:
a.connect(world[ii][jj])
everyone = re | duce(lambda x, y: x + y, world)
for step in xrange(1 | 00):
for a in everyone:
a.calc_next_state()
for a in everyone:
a.update()
print 'part 1', sum(1 if a.state else 0 for a in everyone)
working = []
for a in everyone:
if a.row in (0, 99) and a.col in (0, 99):
a.state = True
else:
a.reset()
working.append(a)
for step in xrange(100):
for a in working:
a.calc_next_state()
for a in working:
a.update()
print 'part 2', sum(1 if a.state else 0 for a in everyone)
|
JeremyGrosser/quisk | src/softrock/hardware_usb.py | Python | gpl-2.0 | 8,270 | 0.013785 | # Please do not change this hardware control module for Quisk.
# It provides USB control of SoftRock hardware.
import struct, threading, time, traceback, math
from quisk_hardware_model import Hardware as BaseHardware
import _quisk as QS
# All USB access is through control transfers using pyusb.
# byte_array = dev.ctrl_transfer (IN, bmRequest, wValue, wIndex, length, timout)
# len(string_msg) = dev.ctrl_transfer (OUT, bmRequest, wValue, wIndex, string_msg, timout)
import usb.core, usb.util
DEBUG = 0
# I2C-address of the SI570; Thanks to Joachim Schneider, DB6QS
si570_i2c_address = 0x55
# Thanks to Ethan Blanton, KB8OJH, for this patch for the Si570 (many SoftRocks):
# These are used by SetFreqByDirect(); see below.
# The Si570 DCO must be clamped between these values
SI570_MIN_DCO = 4.85e9
SI570_MAX_DCO = 5.67e9
# The Si570 has 6 valid HSDIV values. Subtract 4 from HSDIV before
# stuffing it. We want to find the highest HSDIV first, so start
# from 11.
SI570_HSDIV_VALUES = [11, 9, 7, 6, 5, 4]
IN = usb.util.build_request_type(usb.util.CTRL_IN, usb.util.CTRL_TYPE_VENDOR, usb.util.CTRL_RECIPIENT_DEVICE)
OUT = usb.util.build_request_type(usb.util.CTRL_OUT, usb.util.CTRL_TYPE_VENDOR, usb.util.CTRL_RECIPIENT_DEVICE)
UBYTE2 = struct.Struct('<H')
UBYTE4 = struct.Struct('<L') # Thanks to Sivan Toledo
class Hardware(BaseHardware):
def __init__(self, app, conf):
BaseHardware.__init__(self, app, conf)
self.usb_dev = None
self.vfo = None
self.key_thread = None
self.name_of_mic_play = conf.name_of_mic_play # True if we can transmit
def open(self): # Called once to open the Hardware
# find our device
self.usb_dev = usb.core.find(idVendor=self.conf.usb_vendor_id, idProduct=self.conf.usb_product_id)
if self.usb_dev is None:
text = 'USB device not found VendorID 0x%X ProductID 0x%X' % (
self.conf.usb_vendor_id, self.conf.usb_product_id)
self.application.sound_error = 1
else:
try:
self.usb_dev.set_configuration()
ret = self.usb_dev.ctrl_transfer(IN, 0x00, 0x0E00, 0, 2)
except:
text = "No permission to access the SoftRock USB interface"
self.application.sound_error = 1
self.usb_dev = None
else:
if len(ret) == 2:
ver = "%d.%d" % (ret[1], ret[0])
else:
ver = 'unknown'
text = 'Capture from SoftRock USB on %s, Firmware %s' % (self.conf.name_of_sound_capt , ver)
if self.name_of_mic_play and self.conf.key_poll_msec:
self.key_thread = KeyThread(self.usb_dev, self.conf.key_poll_msec / 1000.0)
self.key_thread.start()
if self.name_of_mic_play:
self.application.bottom_widgets.info_text.SetLabel(text)
if DEBUG:
print 'Startup freq', self.GetStartupFreq()
print 'Run freq', self.GetFreq()
print 'Address 0x%X' % self.usb_dev.ctrl_transfer(IN, 0x41, 0, 0, 1)[0]
sm = self.usb_dev.ctrl_transfer(IN, 0x3B, 0, 0, 2)
sm = UBYTE2.unpack(sm)[0]
print 'Smooth tune', sm
return text
def close(self): # Called once to close the Hardware
if self.key_thread:
self.key_thread.stop()
self.key_thread = None
def ChangeFrequency(self, tune, vfo, source='', band='', event=None):
if self.usb_dev and self.vfo != vfo:
if self.conf.si570_direct_control:
if self.SetFreqByDirect(vfo):
self.vfo = vfo
elif self.SetFreqByValue(vfo):
self.vfo = vfo
if DEBUG:
print 'Change to', vfo
print 'Run freq', self.GetFreq()
return tune, vfo
def ReturnFrequency(self):
# Return the current tuning and VFO frequency. If neither have changed,
# you can return (None, None). This is called at about 10 Hz by the main.
# return (tune, vfo) # return changed frequencies
return None, None # frequencies have not changed
def ChangeMode(self, mode): # Change the tx/rx mode
# mode is a string: "USB", "AM", etc.
pass
def ChangeBand(self, band):
# band is a string: "60", "40", "WWV", etc.
pass
def HeartBeat(self): # Called at about 10 Hz by the main
pass
def OnPTT(self, ptt):
if self.key_thread:
self.key_thread.OnPTT(ptt)
elif self.usb_dev:
QS.set_key_down(ptt)
try:
self.usb_dev.ctrl_transfer(IN, 0x50, ptt, 0, 3)
except usb.core.USBError:
QS.set_key_down(0)
if DEBUG: traceback.print_exc()
def GetStartupFreq(self): # return the startup frequency / 4
if not self.usb_dev:
return 0
ret = self.usb_dev.ctrl_transfer(IN, 0x3C, 0, 0, 4)
s = ret.tostring()
freq = UBYTE4.unpack(s)[0]
freq = int(freq * 1.0e6 / 2097152.0 / 4.0 + 0.5)
return freq
def GetFreq(self): # return the running frequency / 4
if not self.usb_dev:
return 0
ret = self.usb_dev.ctrl_transfer(IN, 0x3A, 0, 0, 4)
s = ret.tostring()
freq = UBYTE4.unpack(s)[0]
freq = int(freq * 1.0e6 / 2097152.0 / 4.0 + 0.5)
return freq
def SetFreqByValue(self, freq):
freq = int(freq/1.0e6 * 2097152.0 * 4.0 + 0.5)
s = UBYTE4.pack(freq)
try:
self.usb_dev.ctrl_transfer(OUT, 0x32, si570_i2c_address + 0x700, 0, s)
except usb.core.USBError:
if DEBUG: traceback.print_exc()
else:
return True
def SetFreqByDirect(self, freq): # Thanks to Ethan Blanton, KB8OJH
# For now, find the minimum DCO speed that will give us the
# desired frequency; if we're slewing in the future, we want this
# to additionally yield an RFREQ ~= 512.
freq = int(freq * 4)
if freq == 0:
return False
dco_new = None
hsdiv_new = 0
n1_new = 0
for hsdiv in SI570_HSDIV_VALUES:
n1 = int(math.ceil(SI570_MIN_DCO / (freq * hsdiv)))
if n1 < 1:
n1 = 1
else:
n1 = ((n1 + 1) / 2) * 2
dco = (freq * 1.0) * hsdiv * n1
| # Since we're starting with max hsdiv, this can only happen if
# freq was larger than we can handle
if n1 > 128:
c | ontinue
if dco < SI570_MIN_DCO or dco > SI570_MAX_DCO:
# This really shouldn't happen
continue
if not dco_new or dco < dco_new:
dco_new = dco
hsdiv_new = hsdiv
n1_new = n1
if not dco_new:
# For some reason, we were unable to calculate a frequency.
# Probably because the frequency requested is outside the range
# of our device.
return False # Failure
rfreq = dco_new / self.conf.si570_xtal_freq
rfreq_int = int(rfreq)
rfreq_frac = int(round((rfreq - rfreq_int) * 2**28))
# It looks like the DG8SAQ protocol just passes r7-r12 straight
# To the Si570 when given command 0x30. Easy enough.
# n1 is stuffed as n1 - 1, hsdiv is stuffed as hsdiv - 4.
hsdiv_new = hsdiv_new - 4
n1_new = n1_new - 1
s = struct.Struct('>BBL').pack((hsdiv_new << 5) + (n1_new >> 2),
((n1_new & 0x3) << 6) + (rfreq_int >> 4),
((rfreq_int & 0xf) << 28) + rfreq_frac)
self.usb_dev.ctrl_transfer(OUT, 0x30, si570_i2c_address + 0x700, 0, s)
return True # Success
class KeyThread(threading.Thread):
"""Create a thread to monitor the key state."""
def __init__(self, dev, poll_secs):
self.usb_dev = dev
self.poll_secs = poll_secs
self.ptt = 0
self.key_down = 0
threading.Thread.__init__(self)
self.doQuit = threading.Event()
self.doQuit.clear()
def run(self):
while not self.doQuit.isSet():
try:
if self.ptt:
key_down = 1
else: # read key state
ret = self.usb_dev.ctrl_transfer(IN, 0x51, 0, 0, 1)
# bit 0x20 is the tip, bit 0x02 is the ring
if ret[0] & 0x20: # Tip: key is up
key_down = 0
else: # key is down
key_down = 1
if key_down != self.key_down:
self.key_down = key_down
self.usb_dev.ctrl_transfer(IN, 0x50, key_down, 0, 3)
QS.set_key_down(key_down)
except usb.core.USBError:
QS.set_key_down(0)
if DEBUG: traceback.print_exc()
time.sleep(self.poll_secs)
def stop(self):
"""Set a flag to indicate that t |
hsdistefa/MarketQuotes | symbol_downloader.py | Python | mit | 1,466 | 0.002729 | import urllib.request
import json
# This URL returns all stock tickers in JSON format
TICKER_URL = 'http://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.industry%20where%2 | 0id%20in%20%28select%20industr | y.id%20from%20yahoo.finance.sectors%29&format=json&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys'
OUTPUT_FILE = 'symbols.txt'
class TickerDownloader:
def get_symbols(self):
self.download_url()
self.parse()
def download_url(self, url=TICKER_URL):
request = urllib.request.Request(TICKER_URL)
response = urllib.request.urlopen(request)
encoding = response.info().get_param('charset', 'utf8')
# Load JSON
self._json = json.loads(
response.read().decode(encoding))['query']['results']['industry']
def parse(self):
with open(OUTPUT_FILE, 'w+') as outfile:
# Parse symbols from JSON
for i in range(len(self._json)):
try:
num_companies = len(self._json[i]['company'])
except:
print('format error')
continue
for j in range(num_companies):
try:
outfile.write(
self._json[i]['company'][j]['symbol'] + '\n')
except:
print('format error')
if __name__ == '__main__':
TickerDownloader().get_symbols()
|
wtsi-hgi/irobot | irobot/precache/db/_dbi.py | Python | gpl-3.0 | 6,897 | 0.00203 | """
Copyright (c) 2017 Genome Research Ltd.
Author: Christopher Harrison <ch12@sanger.ac.uk>
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be usef | ul, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along
with this | program. If not, see <http://www.gnu.org/licenses/>.
"""
from collections import OrderedDict
from inspect import Parameter, signature
from typing import Any, Dict, Iterator, List, Optional, Sequence, Tuple, Type, Union
import apsw
from irobot.precache.db._types import Adaptor, Convertor, SQLite
from irobot.precache.db._udf import AggregateUDF, aggregate_udf_factory_factory
# Type aliases
_PyBindings = Union[Tuple[Any, ...], Dict[str, Any]]
_SQLiteBindings = Union[Tuple[SQLite, ...], Dict[str, SQLite]]
_Adaptors = Dict[Type, Adaptor]
_Convertors = Dict[str, Convertor]
class Cursor(Iterator):
"""
Cursor implementation that adds adaptor and convertor support to the
default APSW cursor
"""
def __init__(self, native_cursor: "apsw.Cursor") -> None:
"""
Constructor
@param native_cursor APSW Cursor
"""
self._cursor = native_cursor
# Get adaptors and convertors from parent connection
conn = self._cursor.getconnection()
self._adaptors = conn._adaptors
self._convertors = conn._convertors
def __iter__(self) -> "Cursor":
return self
def __next__(self) -> Tuple:
"""
Fetch the next row of data from the cursor and convert values
for any matching SQLite declarations
@return Row of data
"""
data = next(self._cursor)
desc = self._cursor.getdescription()
return tuple(
self._convertors.get(type_decl, lambda x: x)(value)
for value, (_col_name, type_decl)
in zip(data, desc)
)
def _adapt_pyval(self, pyval: Any) -> SQLite:
"""
Adapt a Python value to a native SQLite type
@param pyval Python value
@return Native SQLite value
"""
pytype = type(pyval)
if pyval is None or pytype in [str, bytes, int, float]:
# Pass through already native types
return pyval
try:
# Try to adapt non-native types
return self._adaptors[pytype](pyval)
except KeyError:
raise TypeError(f"No adaptor for {pytype.__name__} type")
def _adapt_bindings(self, bindings: _PyBindings) -> _SQLiteBindings:
"""
Adapt bind variables to native SQLite types
@param bindings Bind variables (Python types)
@return Bind variables (SQLite types)
"""
if isinstance(bindings, Tuple):
return tuple(map(self._adapt_pyval, bindings))
elif isinstance(bindings, Dict):
return {k: self._adapt_pyval(v) for k, v in bindings.items()}
else:
raise TypeError("Invalid bindings; should be a tuple or dictionary")
def execute(self, sql: str, bindings: Optional[_PyBindings]=None) -> "Cursor":
"""
Executes the SQL statements with the specified bindings
@param sql SQL statements (string)
@param bindings Bind variables
@return Cursor to execution
"""
sqlite_bindings = self._adapt_bindings(bindings) if bindings else None
return Cursor(self._cursor.execute(sql, sqlite_bindings))
def executemany(self, sql: str, binding_seq: Sequence[_PyBindings]) -> "Cursor":
"""
Executes the SQL statements with a sequence of bindings
@param sql SQL statements (string)
@param binding_seq Sequence of bind variables
@return Cursor to execution
"""
sqlite_binding_seq = [self._adapt_bindings(v) for v in binding_seq]
return Cursor(self._cursor.executemany(sql, sqlite_binding_seq))
def fetchone(self) -> Optional[Tuple]:
"""
Fetch the next row of data from the cursor
@return Row of data (tuple; None on no more data)
"""
try:
return next(self)
except StopIteration:
return None
def fetchall(self) -> List[Tuple]:
"""
Fetch all the remaining rows of data from the cursor
@return Rows of data (list)
"""
return list(self)
class Connection(apsw.Connection):
"""
Subtyped APSW connection that allows us to register adaptors and
convertors and which returns a cursor that supports them, as well as
a more convenient interface for registering aggregate UDFs
"""
def __init__(self, *args, **kwargs) -> None:
""" Constructor """
super().__init__(*args, **kwargs)
self._adaptors: _Adaptors = {}
self._convertors: _Convertors = {}
def cursor(self) -> Cursor:
"""
Create a new cursor on this connection
@return Cursor
"""
return Cursor(super().cursor())
def register_aggregate_function(self, name: str, udf: Type[AggregateUDF]) -> None:
"""
Register an aggregation function using an AggregateUDF
implementation
@param udf Aggregate function implementation (AggregateUDF)
"""
# The first parameter is self, so we cut that off
param_kinds = list(map(lambda x: x.kind,
list(OrderedDict(signature(udf.step).parameters).values())[1:]))
if any(p in [Parameter.KEYWORD_ONLY, Parameter.VAR_KEYWORD] for p in param_kinds):
raise TypeError(f"Aggregate function {udf.__name__} has an invalid step signature")
num_args = -1 if any(p == Parameter.VAR_POSITIONAL for p in param_kinds) else len(param_kinds)
assert len(name) < 255, f"\"{name}\" name is too long for aggregate function"
self.createaggregatefunction(name, aggregate_udf_factory_factory(udf), num_args)
def register_adaptor(self, t: Type, adaptor: Adaptor) -> None:
"""
Register a type adaptor
@param t Type
@param adaptor Adaptor function (callable)
"""
self._adaptors[t] = adaptor
def register_convertor(self, decl: str, convertor: Convertor) -> None:
"""
Register a type convertor
@param decl Declared type (string)
@param convertor Convertor function (callable)
"""
self._convertors[decl] = convertor
|
cwebber314/pyqt_db | setup.py | Python | mit | 1,320 | 0.019697 | """
To build cx_Freeze executable:
python setup.py bdist_msi
"""
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need
# fine tuning.
buildOptions = dict(packages = [], excludes = [])
import PyQt5
from glob import glob
import sys, os
import os.path as osp
base = 'Win32GUI' if sys.platform=='win32' else None
executables = [
Executable('edit_table.py', base=base)
]
PY_HOME = osp.abspath(osp.join(osp.split(os.__file__)[0], os.pardir))
# C:\Anaconda2\Library\plugins\platforms
platforms_file = osp.join(PY_HOME, "Library", "plugins", 'platforms', 'qwindows.dll')
setup(name='pyqt_db',
version = '1.0',
description = 'Playing around with Qt5 and database widgets',
data_files = [
('', glob(r'C:\Windows\SYSTEM32\msvcp100.dll')),
('', glob(r'C:\Windows\SYSTEM32\msvcr100.dll')),
('', ['example.sqlite']),
#('platforms', glob(osp.join(PY_HOME, 'Lib\site-packages\PyQt5\plugins\platforms\windows.dll'))),
('platforms', [platforms_file]),
#('images', | ['images\logo.png']),
#('i | mages', ['images\shannon.png']),
],
options = {
'py2exe': {
'bundle_files': 1,
'includes': ['sip', 'PyQt5.QtCore'],
}
},
executables = executables)
|
leighpauls/k2cro4 | native_client/tests/gdb/print_symbol.py | Python | bsd-3-clause | 694 | 0.012968 | # -*- python -*-
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gdb_test import AssertEquals
import gdb_test
d | ef test(gdb):
gdb.Command('break set_global_var')
AssertEquals(gdb.ResumeCommand('continue')['reason'], 'breakpoint-hit')
AssertEquals(gdb.Eval('global_var'), '2')
AssertEquals(gdb.Eval('arg'), '1')
AssertEquals(gdb.ResumeCommand('finish')['reason'], 'func | tion-finished')
AssertEquals(gdb.Eval('global_var'), '1')
AssertEquals(gdb.Eval('local_var'), '3')
gdb.Quit()
if __name__ == '__main__':
gdb_test.RunTest(test, 'print_symbol')
|
plumgrid/plumgrid-nova | nova/ipv6/__init__.py | Python | apache-2.0 | 698 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundati | on
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may ob | tain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.ipv6.api import * # noqa
|
mozilla/popcorn_maker | popcorn_gallery/users/urls.py | Python | bsd-3-clause | 294 | 0 | from django.con | f.urls.defaults import patterns, url
urlpatterns | = patterns(
'popcorn_gallery.users.views',
url(r'^edit/$', 'edit', name='users_edit'),
url(r'^delete/$', 'delete_profile', name='users_delete'),
url(r'^(?P<username>[\w-]+)/$', 'profile', name='users_profile'),
)
|
jr-garcia/Engendro3D | e3d/gui/FontRendering/japanese_range.py | Python | mit | 6,858 | 0 | # coding=utf-8
jchars = u'。々ゝヽゞヾーぁァあアぃィいイぅゥうウヴぇェえエぉォおオヵかカがガきキぎギくクぐグヶけケげゲこコごゴさサざザしシじジすスず' \
u'ズせセぜゼそソぞゾたタだダちチぢヂっッつツづヅてテでデとトどドなナにニぬヌねネのノはハばバぱパひヒびビぴピふフぶブぷプへ' \
u'ヘべベぺペほホぼボぽポまマみミむムめメもモゃャやヤゅュゆユょョよヨらラりリるルれレろロゎヮわワゐヰゑヱをヲんン一丁七万-' \
u'下不与丑且世丘丙両並中丸丹主久乏乗乙九乱乳乾亀了予争事二互五井亜亡交亥亨享-亭人仁今介仏仕他付仙代-以仮仰仲件任企伊伏-休' \
u'会伝伯伴伸伺似但位-佐体何余作佳併使例侍供依価侮侯侵便係促俊俗保信修俳俵俸俺倉個倍倒候借倣値倫倹偉偏停健側-偶偽傍傑傘備催' \
u'債傷傾働像僕僚僧儀億儒償優元-兆先光克免兎児党入全八-六共兵具典兼内円冊再冒冗写冠冬冷准凍凝凡処凶凸-出刀刃分-刈刊刑列初判' \
u'別利到制-券刺刻則削前剖剛剣剤副剰割創劇力功加劣助努励労効劾勅勇勉動勘務勝募勢勤勧勲勺匁包化北匠匹-医匿十千升午半卑-協南単' \
u'博占卯-危即-卵卸厄厘厚原厳去参又及-収叔取受叙口-句叫召可台史右号司各合吉同-向君吟否含吸吹呈-告周味呼命和咲哀品員哲唆唇唐' \
u'唯唱商問啓善喚喜喝喪喫営嗣嘆嘉嘱器噴嚇囚四回因団困囲図固国圏園土圧在地坂均坊坑坪垂型垣埋城域執培基埼堀堂堅堕堤堪報場塀塁' \
u'塊塑塔塗塚塩塾境墓増墜墨墳墾壁壇壊壌士壬壮声-売変夏夕外多夜夢大天-夫央失奇-奉奏契奔奥奨奪奮女奴好如-妄妊妙妥妨妹妻姉始姓' \
u'委姫姻姿威娘娠娯婆婚婦婿媒嫁嫌嫡嬢子孔字存孝季孤学孫宅宇-安完宗-定宜宝実客-室宮宰害-家容宿寂寄-密富寒寛寝察寡寧審寮寸寺対' \
u'寿封専射将尉-尋導小少尚就尺尼-局居屈届屋展属層履屯山岐岡岩岬岳岸峠峡峰島崇崎崩川州巡巣工-巨差己巳巻市布帆希帝帥師席帯帰帳常' \
u'帽幅幕幣干-年幸幹幻-幾庁広床序底店庚府度座庫庭庶-庸廃廉廊延廷建弁弊式弐弓-引弘弟弦弧弱張強弾当形彩彫彰影役彼往征径待律後徐' \
u'徒従得御復循微徳徴徹心必忌忍志-忙応忠快念怒怖思怠急性怪恋恐恒恥恨恩恭息恵悔悟悠患悦悩悪悲悼情惑惜惨惰想愁愉意愚愛感慈態慌' \
u'慎慕慢慣慨慮慰慶憂憎憤憩憲憶憾懇懐懲懸戊戌成-戒戦戯戸戻房所扇扉手才打払扱扶批承技抄把抑投抗折抜択披抱抵抹押抽担拍拐拒拓拘' \
u'拙招拝拠拡括拷拾持指挑挙挟振挿捕捜捨据掃授掌排掘掛採探接控推措掲描提揚換握揮援揺損搬搭携搾摂摘摩撃撤撮撲擁操擦擬支改攻放政' \
u'故敏救敗教敢散敬数整敵敷文斉斎斗料斜斤 | 斥断新方施旅旋族旗既日旧-早旬昆昇昌明易昔星映春昨昭是昼時晩普景晴晶暁暇暑暖暗暦暫暮暴' \
u'曇曜曲更書曹替最月有服朕朗望朝期木未-札朱朴机朽杉材村束条来杯東松板析林枚果枝枠枢枯架柄某染柔柱柳査栄栓校株核根格栽桃案桑' \
u'桜桟梅 | 械棄棋棒棚棟森棺植検業極楼楽概構様槽標模権横樹橋機欄欠次欧欲欺款歌歓止正武歩歯歳歴死殉-残殖殴段殺殻殿母毎毒比毛氏民' \
u'気水氷永汁求汎汗汚江池決汽沈沖没沢河沸油治沼沿況泉泊泌法泡-泣泥注泰泳洋洗洞津洪活派流浄浅浜浦浪浮浴海浸消涙涯液涼淑淡深混' \
u'添清渇-渉渋渓減渡渦温測港湖湯湾-満源準溝溶滅滋滑滝滞滴漁漂漆漏演漠漢漫漬漸潔潜潟潤潮澄激濁濃濫濯瀬火灯灰災炉炊炎炭点為烈' \
u'無焦然焼煙照煩煮熟熱燃燥爆爵父片版牙牛牧物牲特犠犬犯状狂狩独狭猛猟猪猫献猶猿獄獣獲玄率玉王珍珠班現球理琴環璽瓶甘甚生産用田' \
u'-申男町画界畑畔留畜畝略番異畳疎疑疫疲疾病症痘痛痢痴療癒癖癸発登白百的皆皇皮皿盆益盗盛盟監盤目盲直相盾省看県真眠眺眼着睡督' \
u'瞬矛矢知短矯石砂研砕砲破硝硫硬碁碑確磁磨礁礎示礼社祈祉祖祚祝神祥票祭禁禄禅禍-福秀私秋科秒秘租秩称移程税稚種稲稼稿穀穂積穏' \
u'穫穴究空突窃窒窓窮窯立竜章童端競竹笑笛符第筆等筋筒答策箇算管箱節範築篤簡簿籍米粉粋粒粗粘粛粧精糖糧糸系糾紀約紅紋納純紙-' \
u'紛素-索紫累細紳紹紺終組経結絞絡給統絵絶絹継続維綱網綿緊総緑緒線締編緩緯練縁縄縛縦縫縮績繁繊織繕繭繰缶罪置罰署罷羅羊美群義' \
u'羽翁翌習翻翼老考者耐耕耗耳聖聞聴職肉肌肖肝肢肥肩肪肯育肺胃胆背胎胞胴胸能脂脅脈脚脱脳脹腐腕腰腸腹膚膜膨臓臣臨自臭至致興舌舎舗' \
u'舞舟航般舶船艇艦良色芋芝花芳芸芽苗若苦英茂茎茶草荒荘荷菊菌菓菜華落葉著葬蒸蓄蔵薄薦薪-薬藤藩藻虎虐虚虜虞虫蚊蚕蛇蛍蛮融血衆行' \
u'術街衛衝衡衣表衰衷袋被裁裂装裏裕補裸製複褐褒襟襲西要覆覇見規視覚覧親観角解触言訂計討訓託記訟訪設許訳訴診証詐詔評詞詠試詩' \
u'詰-詳誇誉誌認誓誕誘語誠誤説読誰課調談請論諭諮諸諾謀謁謄謙講謝謡謹識譜警議譲護谷豆豊豚象豪貝貞負-貢貧-販貫責貯貴買貸費貿賀' \
u'賃賄資賊賓賛賜賞賠賢賦質購贈赤赦走赴起超越趣足距跡路跳践踊踏躍身車軌軍軒軟転軸軽較載輝輩輪輸轄辛辞辰-農辺込迅迎近返迫迭述' \
u'迷追退送逃逆透逐逓途通逝速造連逮週進逸遂遅遇遊運遍過道-違遠遣適遭遮遵遷選遺避還邦邪邸郊郎郡部郭郵郷都酉酌配酒酔酢酪酬酵酷酸' \
u'醜醸釈里-量金針釣鈍鈴鉄鉛鉢鉱銀銃銅銑銘銭鋭鋳鋼錘錠錬錯録鍛鎖鎮鏡鐘鑑長門閉開閏閑間関閣閥閲闘阪防阻附降限陛院-陥陪陰陳陵' \
u'陶陸険陽隅隆隊階随隔際障隠隣隷隻雄-雇雉雌雑離難雨雪雰雲零雷電需震霊霜霧露青静非面革靴韓音韻響頂頃項順預-頒領頭頻頼題額顔顕' \
u'願類顧風飛食飢飯飲飼-飾養餓館首香馬駄-駆駐騎騒験騰驚骨髄高髪鬼魂魅魔魚鮮鯨鳥鳴鶏鹿麗麦麻黄黒黙鼓鼠鼻齢'
jrange = list(jchars)
|
enormandeau/ngs_genotyping | scripts/10_genotype_from_blast_results.py | Python | gpl-3.0 | 2,437 | 0.004514 | #!/usr/bi | n/python
"""Use files with frequency counts of blasts on possible alleles (see format
below) to genotype individuals.
Usage:
genotype_from_blast_results.py file_list threshold_file output_file
- file_list is a file containing the path to the individual summary file | s. the
file_list format is as follows:
individual_summary/MID100-02_cleaned_aligned.fasta.blasts_summary
individual_summary/MID100-03_cleaned_aligned.fasta.blasts_summary
individual_summary/MID100-04_cleaned_aligned.fasta.blasts_summary
...
individual_summary/MID102-03_cleaned_aligned.fasta.blasts_summary
- The individual summary files are in the following format (output format of
sort | uniq -c | sort -nr in Linux terminal):
316 A_27
197 A_15
13 A_1
4 A_8
4 A_19
3 A_28
1 A_9
1 A_16
- threshold_file is a file with, on each line, the name of an allele and the
minimal count to consider this allele real in an individual, separated by a
tabulation. Format:
A_1 50
A_2 50
A_7 60
A_10 20
- output_file is the name of the file that will contain the results
"""
# Importing modules
import sys
import re
# Main
if __name__ == '__main__':
try:
file_list = sys.argv[1]
threshold_file = sys.argv[2]
output_file = sys.argv[3]
except:
print __doc__
sys.exit(1)
to_write = []
individuals = [l.strip() for l in open(file_list) if l.strip() != ""]
allele_threshold ={}
thresholds = [l.strip().split("\t") for l in open(threshold_file) if l.strip() != ""]
for tr in thresholds:
t, a = int(tr[0]), tr[1]
allele_threshold[a] = t
for i in individuals:
itemp = i.replace("individual_summary/", "")
itemp = itemp.replace("A_", "")
itemp = re.sub('(MID[0-9]+-[0-9]+).*', '\\1', itemp)
data = [l.strip() for l in open(i) if l.strip() != ""]
allele_dict = {}
for d in data:
count, allele = d.split(" ")
allele_dict[allele] = int(count)
total = sum(allele_dict.values())
allele_number = 0
#temp_min_proportion = min_proportion
for a in allele_dict:
atemp = a.replace("_", "")
if allele_dict[a] >= allele_threshold[a]:
allele_number += 1
to_write.append("_".join([itemp, str(allele_number), atemp]))
open(output_file, "w").write("\n".join(to_write))
|
idan/oauthlib | oauthlib/openid/connect/core/endpoints/userinfo.py | Python | bsd-3-clause | 3,847 | 0.00052 | """
oauthlib.openid.connect.core.endpoints.userinfo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of userinfo endpoint.
"""
import json
import logging
from oauthlib.common import Request
from oauthlib.oauth2.rfc6749 import errors
| from oauthlib.oauth2.rfc6749.endpoints.base import (
BaseEndpoint, catch_errors_and_unavailability,
)
| from oauthlib.oauth2.rfc6749.tokens import BearerToken
log = logging.getLogger(__name__)
class UserInfoEndpoint(BaseEndpoint):
"""Authorizes access to userinfo resource.
"""
def __init__(self, request_validator):
self.bearer = BearerToken(request_validator, None, None, None)
self.request_validator = request_validator
BaseEndpoint.__init__(self)
@catch_errors_and_unavailability
def create_userinfo_response(self, uri, http_method='GET', body=None, headers=None):
"""Validate BearerToken and return userinfo from RequestValidator
The UserInfo Endpoint MUST return a
content-type header to indicate which format is being returned. The
content-type of the HTTP response MUST be application/json if the
response body is a text JSON object; the response body SHOULD be encoded
using UTF-8.
"""
request = Request(uri, http_method, body, headers)
request.scopes = ["openid"]
self.validate_userinfo_request(request)
claims = self.request_validator.get_userinfo_claims(request)
if claims is None:
log.error('Userinfo MUST have claims for %r.', request)
raise errors.ServerError(status_code=500)
if isinstance(claims, dict):
resp_headers = {
'Content-Type': 'application/json'
}
if "sub" not in claims:
log.error('Userinfo MUST have "sub" for %r.', request)
raise errors.ServerError(status_code=500)
body = json.dumps(claims)
elif isinstance(claims, str):
resp_headers = {
'Content-Type': 'application/jwt'
}
body = claims
else:
log.error('Userinfo return unknown response for %r.', request)
raise errors.ServerError(status_code=500)
log.debug('Userinfo access valid for %r.', request)
return resp_headers, body, 200
def validate_userinfo_request(self, request):
"""Ensure the request is valid.
5.3.1. UserInfo Request
The Client sends the UserInfo Request using either HTTP GET or HTTP
POST. The Access Token obtained from an OpenID Connect Authentication
Request MUST be sent as a Bearer Token, per Section 2 of OAuth 2.0
Bearer Token Usage [RFC6750].
It is RECOMMENDED that the request use the HTTP GET method and the
Access Token be sent using the Authorization header field.
The following is a non-normative example of a UserInfo Request:
GET /userinfo HTTP/1.1
Host: server.example.com
Authorization: Bearer SlAV32hkKG
5.3.3. UserInfo Error Response
When an error condition occurs, the UserInfo Endpoint returns an Error
Response as defined in Section 3 of OAuth 2.0 Bearer Token Usage
[RFC6750]. (HTTP errors unrelated to RFC 6750 are returned to the User
Agent using the appropriate HTTP status code.)
The following is a non-normative example of a UserInfo Error Response:
HTTP/1.1 401 Unauthorized
WWW-Authenticate: Bearer error="invalid_token",
error_description="The Access Token expired"
"""
if not self.bearer.validate_request(request):
raise errors.InvalidTokenError()
if "openid" not in request.scopes:
raise errors.InsufficientScopeError()
|
GeoCat/QGIS | python/plugins/processing/algs/qgis/DensifyGeometriesInterval.py | Python | gpl-2.0 | 2,510 | 0.000797 | # -*- coding: utf-8 -*-
"""
***************************************************************************
DensifyGeometriesInterval.py by Anita Graser, Dec 2012
based on DensifyGeometries.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import range
__author__ = 'Anita Graser'
__date__ = 'Dec 2012'
__copyright__ = ' | (C) 2012, Anita Graser'
# This will get replaced with a git S | HA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (QgsProcessingParameterNumber)
from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm
class DensifyGeometriesInterval(QgisFeatureBasedAlgorithm):
INTERVAL = 'INTERVAL'
def group(self):
return self.tr('Vector geometry')
def __init__(self):
super().__init__()
self.interval = None
def initParameters(self, config=None):
self.addParameter(QgsProcessingParameterNumber(self.INTERVAL,
self.tr('Interval between vertices to add'), QgsProcessingParameterNumber.Double,
1, False, 0, 10000000))
def name(self):
return 'densifygeometriesgivenaninterval'
def displayName(self):
return self.tr('Densify geometries given an interval')
def outputName(self):
return self.tr('Densified')
def prepareAlgorithm(self, parameters, context, feedback):
interval = self.parameterAsDouble(parameters, self.INTERVAL, context)
return True
def processFeature(self, feature, feedback):
if feature.hasGeometry():
new_geometry = feature.geometry().densifyByDistance(float(interval))
feature.setGeometry(new_geometry)
return feature
|
solent-eng/solent | solent/eng/engine.py | Python | lgpl-3.0 | 20,491 | 0.003416 | # // license
# Copyright 2016, Free Software Foundation.
#
# This file is part of Solent.
#
# Solent is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# Solent is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# Solent. If not, see <http://www.gnu.org/licenses/>.
#
# // overview
# Network engine. This is the core class of the eng system. It provides
# something that's similar to a reactor, but it's less ambitious than other
# reactor-based systems.
#
# Maybe you're here because you're trying to understand how to use this class.
# This would be a hairy read. A better starting point would be scenarios.py.
# * See if the thing you are trying to do is covered there. If it is, then
# learn by example, and spare yourself the anguish of reading this system.
# * Also, there's a FAQ at the top there.
#
# If you do decide to press on and delve into this code, I recommend you start
# as follows:
# - read the header for metasock.py. There's lots of edge-cases in the
# Berkeley sockets API, and there's an unusual amount of complexity to
# facading that. Much of what is here is explained by that priority.
# - read from event_loop below to see what an engine instance does once
# it has been started.
from .action_pool import ActionPool
from .activity import Activity
from .clock import Clock
from .metasock import MetasockCloseCondition
from .metasock import metasock_create_sub
from .metasock import metasock_create_pub
from .metasock import metasock_create_tcp_accept
from .metasock import metasock_create_tcp_client
from .metasock import metasock_create_tcp_server
from .orb import Orb
from solent import uniq
from solent import log
from solent import Mempool
from collections import OrderedDict as od
import platform
import select
import socket
import time
import traceback
PLATFORM_SYSTEM = platform.system()
def eloop_debug(msg):
log('(@) %s'%msg)
class CsMsClose:
'''
This exists to handle a very specific scenario. Imagine if you decide to
close a socket during an event loop. The event loop has several stages.
You don't want the later stages of the event loop to do things to the
socket if it is in closing.
This callback exists so that we can have an ignore list for things that
are in that state.
Unlike other callbacks in the file, user code should never be concerned
with it. It is specific to the relationship between Engine and Metasock.
You don't need to be aware of this structure you're coming to grips
with engine/metasock. It has been deliberately kept out of cs.py to
reduce the likelihood of end-users being confused by it.
'''
def __init__(self):
self.ms = None
self.sid = None
self.message = None
def __repr__(self):
return '(%s%s)'%(self.__class__.__name__, '|'.join([str(x) for x in
[self.ms, self.sid, self.message]]))
class CsEngCustomFdRead:
def __init__(self):
self.cfd_h = None
self.fd = None
class Engine(object):
def __init__(self, mtu):
self.mtu = mtu
| #
self.mempool = Mempool()
self.clock = Clock()
self.action_pool = A | ctionPool()
self.sid_to_metasock = od()
self.spins = od()
#
self.activity = Activity()
self.b_debug_eloop = False
self.sid_counter = 0
self.default_timeout = 0.2
self.b_nodelay = False
#
self.cb_ms_close = None
self.cs_ms_close = CsMsClose()
#
# fd vs (cfd_h, cb_eng_custom_fd_read)
self.d_eng_custom_read = {}
self.cs_eng_custom_fd_read = CsEngCustomFdRead()
def enable_nodelay(self):
self.b_nodelay = True
def disable_nodelay(self):
self.b_nodelay = False
def debug_eloop_on(self):
self.b_debug_eloop = True
def debug_eloop_off(self):
self.b_debug_eloop = False
def get_clock(self):
return self.clock
def get_mtu(self):
return self.mtu
def set_mtu(self, mtu):
self.mtu = mtu
def set_default_timeout(self, value):
self.default_timeout = value
def create_sid(self):
next = self.sid_counter
self.sid_counter += 1
return next
def close(self):
items = [pair for pair in self.sid_to_metasock.items()]
for (sid, ms) in items:
try:
self._close_metasock(
sid=sid,
reason='engine_closing')
except:
traceback.print_exc()
for orb in self.spins.values():
try:
orb.eng_close()
except:
traceback.print_exc()
def _add_spin(self, spin_h, spin):
eng_methods = [m for m in dir(spin) if m.startswith('eng_')]
m = "Missing method. Need eng_turn(activity), eng_close()"
if 'eng_turn' not in eng_methods:
raise Exception(m)
if 'eng_close' not in eng_methods:
raise Exception(m)
if spin_h in self.spins:
raise Exception("Engine already has spin_h %s"%spin_h)
if spin in self.spins.values():
raise Exception("Orb is already in engine. Don't double-add.")
self.spins[spin_h] = spin
def init_orb(self, i_nearcast):
'''
Orb is a special kind of spin that does nearcasting.
'''
# The orb has a method for changing this. It was a hassle and likely
# confusing to new users to force the user to be constantly changing
# this. Hence, we default it.
spin_h = 'orb/%s'%(uniq())
spin = Orb(
spin_h=spin_h,
engine=self,
i_nearcast=i_nearcast)
self._add_spin(
spin_h=spin_h,
spin=spin)
return spin
def init_spin(self, construct, **kwargs):
spin_h = '%s/%s'%(str(construct), uniq())
spin = construct(
spin_h=spin_h,
engine=self,
**kwargs)
self._add_spin(
spin_h=spin_h,
spin=spin)
return spin
def del_spin(self, spin_h):
# xxx unsubscribe logic if it is an orb
del self.spins[spin_h]
def turn(self, timeout=0):
b_any_activity_at_all = False
spins_in_this_loop = list(self.spins.values())
for spin in spins_in_this_loop:
spin.eng_turn(
activity=self.activity)
# Determine if there was activity from the spins
lst_orb_activity = self.activity.get()
if lst_orb_activity:
self.activity.clear()
b_any_activity_at_all = True
if self.b_debug_eloop:
for s in lst_orb_activity:
eloop_debug('*ACTIVITY* %s'%(s))
# Select
activity_from_select = self._call_select(timeout)
if activity_from_select:
b_any_activity_at_all = True
if self.b_debug_eloop:
eloop_debug('select activity')
# If we have activity, we don't want select jamming
# things up with delays.
if b_any_activity_at_all:
# want no timeout in next loop
timeout = 0
else:
# We are in a period of inactivity: let the next loop
# select have some timeout.
timeout = self.default_timeout
return timeout
def cycle(self):
'''
This causes a sequence of turns to run until there is no more
activity. This can be useful for testing and troubleshooting.
'''
timeout = 0
while timeout == 0:
timeout = self.turn()
def event_loop(self):
'''
Lets the engine take ownershi |
taschini/morepath | morepath/tests/test_security.py | Python | bsd-3-clause | 16,352 | 0 | # -*- coding: utf-8 -*-
import morepath
from morepath.request import Response
from morepath.authentication import Identity, NO_IDENTITY
from .fixtures import identity_policy
import base64
import json
from webtest import TestApp as Client
try:
from cookielib import CookieJar
except ImportError:
from http.cookiejar import CookieJar
def test_no_permission():
class app(morepath.App):
pass
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@app.path(model=Model, path='{id}',
variables=lambda model: {'id': model.id})
def get_model(id):
return Model(id)
@app.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
c = Client(app())
c.get('/foo', status=403)
def test_permission_directive_identity():
class app(morepath.App):
pass
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@app.verify_identity()
def verify_identity(identity):
return True
@app.path(model=Model, path='{id}',
variables=lambda model: {'id': model.id})
def get_model(id):
return Model(id)
@app.permission_rule(model=Model, permission=Permission)
def get_permission(identity, model, permission):
if model.id == 'foo':
return True
else:
return False
@app.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
@app.identity_policy()
class IdentityPolicy(object):
def identify(self, request):
return Identity('testidentity')
def remember(self, response, request, identity):
pass
def forget(self, response, request):
pass
c = Client(app())
response = c.get('/foo')
assert response.body == b'Model: foo'
response = c.get('/bar', status=403)
def test_permission_directive_with_app_arg():
class App(morepath.App):
pass
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@App.verify_identity()
def verify_identity(identity):
return True
@App.path(model=Model, path='{id}',
variables=lambda model: {'id': model.id})
def get_model(id):
return Model(id)
@App.permission_rule(model=Model, permission=Permission)
def get_permission(app, identity, model, permission):
assert isinstance(app, App)
if model.id == 'foo':
return True
else:
return False
@App.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
@App.identity_policy()
class IdentityPolicy(object):
def identify(self, request):
return Identity('testidentity')
def remember(self, response, request, identity):
pass
def forget(self, response, request):
pass
c = Client(App())
response = c.get('/foo')
assert response.body == b'Model: foo'
response = c.get('/bar', status=403)
def test_permission_directive_no_identity():
class app(more | path.App):
pass
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@app.path(model=Model, path='{id}',
variables=lambda mod | el: {'id': model.id})
def get_model(id):
return Model(id)
@app.permission_rule(model=Model, permission=Permission, identity=None)
def get_permission(identity, model, permission):
if model.id == 'foo':
return True
else:
return False
@app.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
c = Client(app())
response = c.get('/foo')
assert response.body == b'Model: foo'
response = c.get('/bar', status=403)
def test_policy_action():
c = Client(identity_policy.app())
response = c.get('/foo')
assert response.body == b'Model: foo'
response = c.get('/bar', status=403)
def test_no_identity_policy():
class App(morepath.App):
pass
@App.path(path='{id}')
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@App.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
@App.view(model=Model, name='log_in')
def log_in(self, request):
response = Response()
request.app.remember_identity(
response, request, Identity(userid='user', payload='Amazing'))
return response
@App.view(model=Model, name='log_out')
def log_out(self, request):
response = Response()
request.app.forget_identity(response, request)
return response
@App.verify_identity()
def verify_identity(identity):
return True
c = Client(App())
# if you protect things with permissions and you
# install no identity policy, doing a log in has
# no effect
c.get('/foo', status=403)
c.get('/foo/log_in')
c.get('/foo', status=403)
c.get('/foo/log_out')
c.get('/foo', status=403)
class DumbCookieIdentityPolicy(object):
"""A very insecure cookie-based policy.
Only for testing. Don't use in practice!
"""
def identify(self, request):
data = request.cookies.get('dumb_id', None)
if data is None:
return NO_IDENTITY
data = json.loads(base64.b64decode(data).decode())
return Identity(**data)
def remember(self, response, request, identity):
data = base64.b64encode(str.encode(json.dumps(identity.as_dict())))
response.set_cookie('dumb_id', data)
def forget(self, response, request):
response.delete_cookie('dumb_id')
def test_cookie_identity_policy():
class app(morepath.App):
pass
@app.path(path='{id}')
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@app.permission_rule(model=Model, permission=Permission)
def get_permission(identity, model, permission):
return identity.userid == 'user'
@app.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
@app.view(model=Model, name='log_in')
def log_in(self, request):
response = Response()
request.app.remember_identity(
response, request, Identity(userid='user', payload='Amazing'))
return response
@app.view(model=Model, name='log_out')
def log_out(self, request):
response = Response()
request.app.forget_identity(response, request)
return response
@app.identity_policy()
def policy():
return DumbCookieIdentityPolicy()
@app.verify_identity()
def verify_identity(identity):
return True
c = Client(app(), cookiejar=CookieJar())
response = c.get('/foo', status=403)
response = c.get('/foo/log_in')
response = c.get('/foo', status=200)
assert response.body == b'Model: foo'
response = c.get('/foo/log_out')
response = c.get('/foo', status=403)
def test_default_verify_identity():
class app(morepath.App):
pass
identity = morepath.Identity('foo')
assert not app()._verify_identity(identity)
def test_verify_identity_directive():
class app(morepath.App):
pass
@app.verify_identity()
def verify_identity(identity):
return identity.password == 'right'
identity = morepath.Identity('foo', password='wrong')
assert not app()._verify_identity(identity)
identity = morepath.Identity('foo', password='right')
assert app()._verify_identity(identity)
def test_verify_identity_directive_app_arg():
class App(morepath.App):
pass
@App.verify_identity()
def verify_identity( |
TooAngel/sensorviewer | tests/tester_tests.py | Python | apache-2.0 | 588 | 0.001701 | i | mport tester
import unittest
from mock import Mock, patch, Magi | cMock
class WorkerTestCase(unittest.TestCase):
@patch('tester.requests')
def test_post_error(self, requests):
tester.post()
self.assertTrue(requests.post.called)
@patch('tester.requests')
def test_get_error(self, requests):
tester.get()
self.assertTrue(requests.get.called)
@patch('tester.requests')
def test_calibrate_error(self, requests):
tester.calibrate()
self.assertTrue(requests.post.called)
if __name__ == '__main__':
unittest.main()
|
Jozhogg/iris | lib/iris/tests/unit/analysis/maths/test_add.py | Python | lgpl-3.0 | 1,586 | 0 | # (C) British Crown Copyright 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or mo | dify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed | in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the :func:`iris.analysis.maths.add` function."""
from __future__ import (absolute_import, division, print_function)
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import operator
from iris.analysis.maths import add
from iris.tests.unit.analysis.maths import \
CubeArithmeticBroadcastingTestMixin, CubeArithmeticMaskingTestMixin
class TestBroadcasting(tests.IrisTest, CubeArithmeticBroadcastingTestMixin):
@property
def data_op(self):
return operator.add
@property
def cube_func(self):
return add
class TestMasking(tests.IrisTest, CubeArithmeticMaskingTestMixin):
@property
def data_op(self):
return operator.add
@property
def cube_func(self):
return add
if __name__ == "__main__":
tests.main()
|
Alwnikrotikz/marinemap | lingcod/layers/migrations/0003_auto__add_field_privatelayerlist_name__add_field_privatelayerlist_prio.py | Python | bsd-3-clause | 5,965 | 0.008215 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PrivateLayerList.name'
db.add_column('layers_privatelayerlist', 'name', self.gf('django.db.models.fields.CharField')(default='', max_length=50), keep_default=False)
# Adding field 'PrivateLayerList.priority'
db.add_column('layers_privatelayerlist', 'priority', self.gf('django.db.models.fields.FloatField')(default=0.0), keep_default=False)
def backwards(self, orm):
# Deleting field 'PrivateLayerList.name'
db.delete_column('layers_privatelayerlist', 'name')
# Deleting field 'PrivateLayerList.priority'
db.delete_column('layers_privatelayerlist', 'priority')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'layers.privatelayerlist': {
'Meta': {'object_name': 'PrivateLayerList'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kml': ('django.db.models.fields.files.FileField', [], {'max_length': '510'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'priority': ('django.db.models. | fields.FloatField', [], {'default': '0.0'}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['au | th.Group']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'layers.publiclayerlist': {
'Meta': {'object_name': 'PublicLayerList'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kml': ('django.db.models.fields.files.FileField', [], {'max_length': '510'})
},
'layers.userlayerlist': {
'Meta': {'object_name': 'UserLayerList'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kml': ('django.db.models.fields.files.FileField', [], {'max_length': '510'}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['layers']
|
yeleman/snisi | snisi_reprohealth/models/__init__.py | Python | mit | 540 | 0.007407 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
divisio | n, print_function)
from snisi_reprohealth.models.PFActivities import (PFActiv | itiesR, AggPFActivitiesR)
# from snisi_reprohealth.models.ChildrenMortality import (ChildrenDeathR, AggChildrenDeathR)
# from snisi_reprohealth.models.MaternalMortality import (MaternalDeathR, AggMaternalDeathR)
# from snisi_reprohealth.models.Commodities import (RHProductsR, AggRHProductsR)
|
Auzzy/pyinq | setup.py | Python | isc | 915 | 0.002186 | from setuptools import setup
setup(
name='PyInq',
version='0.2.1',
author='Austin Noto-Moniz',
author_email='pyinq.test@gmail.com',
url='http://auzzy.github.io/pyinq/',
packages=['pyinq', 'pyinq.asserts', 'pyinq.tags', 'pyinq.printers', 'pyinq.printers.html', 'pyinq.printers.cli', 'pyinq.printers.cli.console', 'pyinq.printers.cli.bash', 'pyinq.parsers'],
description='Python unit test framework, an alternative to unittest.',
long_description=open('README.txt').read(),
license='ISCL',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Operating System :: OS Independent',
'P | rogramming Language :: Python',
'Programming Language :: Python :: 2.7',
| 'Topic :: Software Development :: Testing'
]
)
|
leviroth/praw | praw/models/inbox.py | Python | bsd-2-clause | 9,119 | 0 | """Provide the Front class."""
from ..const import API_PATH
from .listing.generator import ListingGenerator
from .base import PRAWBase
from .util import stream_generator
class Inbox(PRAWBase):
"""Inbox is a Listing class that represents the Inbox."""
def all(self, **generator_kwargs):
"""Return a ListingGenerator for all inbox comments and messages.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To output the type and ID of all items available via this listing do:
.. code:: python
for item in reddit.inbox.all(limit=None):
print(repr(item))
"""
return ListingGenerator(
self._reddit, API_PATH["inbox"], **generator_kwargs
)
def collapse(self, items):
"""Mark an inbox message as collapsed.
:param items: A list containing instances of :class:`.Message`.
Requests are batched at 25 items (reddit limit).
For example, to collapse all unread Messages, try:
.. code:: python
from praw.models import Message
unread_messages = []
for item in reddit.inbox.unread(limit=None):
if isinstance(item, Message):
unread_messages.append(item)
reddit.inbox.collapse(unread_messages)
.. seealso::
:meth:`.Message.uncollapse`
"""
while items:
data = {"id": ",".join(x.fullname for x in items[:25])}
self._reddit.post(API_PATH["collapse"], data=data)
items = items[25:]
def comment_replies(self, **generator_kwargs):
"""Return a ListingGenerator for comment replies.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To output the author of one request worth of comment replies try:
.. code:: python
for reply in reddit.inbox.comment_replies():
print(reply.author)
"""
return ListingGenerator(
self._reddit, API_PATH["comment_replies"], **generator_kwargs
)
def mark_read(self, items):
"""Mark Comments or Messages as read.
:param items: A list containing instances of :class:`.Comment` and/or
:class:`.Message` to be be marked as read relative to the
authorized user's inbox.
Requests are batched at 25 items (reddit limit).
For example, to mark all unread Messages as read, try:
.. code:: python
from praw.models import Message
unread_messages = []
for item in reddit.inbox.unread(limit=None):
if isinstance(item, Message):
unread_messages.append(item)
reddit.inbox.mark_read(unread_messages)
.. seealso::
:meth:`.Comment.mark_read` and :meth:`.Message.mark_read`
"""
while items:
data = {"id": ",".join(x.fullname for x in items[:25])}
self._reddit.post(API_PATH["read_message"], data=data)
items = items[25:]
def mark_unread(self, items):
"""Unmark Comments or Messages as read.
:param items: A list containing instances of :class:`.Comment` and/or
:class:`.Message` to be be marked as unread relative to the
authorized user's inbox.
Requests are batched at 25 items (reddit limit).
For example, to mark the first 10 items as unread try:
.. code:: python
to_unread = list(reddit.inbox.all(limit=10))
reddit.inbox.mark_unread(to_unread)
.. seealso::
:meth:`.Comment.mark_unread` and :meth:`.Message.mark_unread`
"""
while items:
data = {"id": ",".join(x.fullname for x in items[:25])}
self._reddit.post(API_PATH["unread_message"], data=data)
items = items[25:]
def mentions(self, **generator_kwargs):
r"""Return a ListingGenerator for mentions.
A mention is :class:`.Comment` in which the authorized redditor is
named in its body like ``/u/redditor_name``.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
For example, to output the author and body of the first 25 mentions
try:
.. code:: python
for mention in reddit.inbox.mentions(limit=25):
print('{}\n{}\n'.format(mention.author, mention.body))
"""
return ListingGenerator(
self._reddit, API_PATH["mentions"], **generator_kwargs
)
def message(self, message_id):
"""Return a Message corresponding to ``message_id``.
:param message_id: The base36 id of a message.
Example:
.. code:: python
message = reddit.inbox.message('7bnlgu')
"""
listing = self._reddit.get(API_PATH["message"].format(id=message_id))
messages = [listing[0]] + listing[0].replies
while messages:
message = messages.pop(0)
if message.id == message_id:
return message
def messages(self, **generator_kwargs):
"""Return a ListingGenerator for inbox messages.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
For example, to output the subject of the most recent 5 messages try:
.. code:: python
for message in reddit.inbox.messages(limit=5):
print(message.subject)
"""
return ListingGenerator(
self._reddit, API_PATH["messages"], **generator_kwargs
)
def sent(self, **generator_kwargs):
"""Return a ListingGenerator for sent messages.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
For example, to output the recipient of the most recent 15 messages
try:
.. code:: python
for message in reddit.inbox.sent(limit=15):
print(message.dest)
"""
return ListingGenerator(
self._reddit, API_PATH["sent"], **generator_kwargs
)
def stream(self, **stream_options):
"""Yield new inbox items as they become available.
Items are yielded oldest first. Up to 100 historical items will
initially be returned.
Keyword arguments are passed to :func:`.stream_generator`.
For example, to retrieve all new inbox items, try:
.. code:: python
for item in reddit.inbox.stream():
print(item)
"""
return stream_generator(self.unread, **stream_options)
def submission_replies(self, **generator_kwargs):
"""Return a ListingGenerator for submission replies.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To output the author of one request worth of submission replies try:
.. code:: python
for reply in reddit.inbox.submission_replies():
print(reply.author)
"""
return ListingGenerator(
self._reddit, API_PATH["submission_replies"], **generator_kwargs
)
def uncollapse(self, items):
"""Mark an inbo | x message as uncollapsed.
:param items: A list containing instances of :class:`.Message`.
Requests are batched at 25 items (reddit limit).
For example, to uncollapse all unread Messages, try:
.. code:: python
from praw.models import Message
unread_messages = []
for item in reddit.inbox.unread(limit=None):
if isins | tance(item, Message):
unread_messages.append(item)
reddit.inbox.uncollapse(unread_messages)
.. seealso::
:meth:`.Message.collapse`
"""
while items:
data = {"id": ",".join(x.fullname for x in items[:25])}
self._reddit.post(API_PATH["uncollapse"], data=data)
items |
ericmjl/bokeh | bokeh/io/__init__.py | Python | bsd-3-clause | 2,128 | 0.011748 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
| '''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#--------------------------------------------- | --------------------------------
# Bokeh imports
from .doc import curdoc
from .export import export_png, export_svgs
from .notebook import install_jupyter_hooks, install_notebook_hook, push_notebook
from .output import output_file, output_notebook, reset_output
from .saving import save
from .showing import show
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'curdoc',
'export_png',
'export_svgs',
'install_notebook_hook',
'push_notebook',
'output_file',
'output_notebook',
'save',
'show',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
install_jupyter_hooks()
del install_jupyter_hooks
|
cosmicAsymmetry/zulip | analytics/tests/test_views.py | Python | apache-2.0 | 2,009 | 0.006471 | from django.utils.timezone import get_fixed_timezone
from zerver.lib.test_classes import ZulipTestCase
from analytics.lib.counts import CountStat
from analytics.views import time_range
from datetime import datetime, timedelta
class TestTimeRange(ZulipTestCase):
def test_time_range(self):
# type: () -> None
HOUR = timedelta(hours=1)
DAY = timedelta(days=1)
TZINFO = get_fixed_timezone(-100) # 100 minutes west of UTC
# Using 22:59 so that converting to UTC and applying ceiling_to_{hour,day} do not commute
a_time = datetime(2016, 3, 14, 22, 59).replace(tzinfo=TZINFO)
# Round up to hour and day
ceiling_hour = datetime(2016, 3, 14, 23).replace(tzinfo=TZINFO)
ceiling_day = datetime(2016, 3, 15).replace(tzinfo=TZINFO)
# test start == end
self.assertEqual(time_range(a_time, a_time, CountStat.HOUR, None), [ceiling_hour])
self.assertEqual(time_range(a_time, a_time, CountStat.DAY, None), [ceiling_day])
# test start == end == boundary, and min_length == 0
self.assertEqual(time_range(ceiling_hour, ceiling_hour, CountStat.HOUR, 0), [ceiling_hour])
self.assertEqual(time_range(ceiling_day, ceiling_day, CountStat.DAY, 0), [ceiling_day])
# test start and end on different boundaries
self.assertEqual(time_range(ceiling_hour, ceiling_hour+HOUR, CountStat.HOUR, None),
[ceiling_hour, ceiling_hour+HOUR])
self.assertEqual(time_range(ceiling_day, ceiling_day | +DAY, CountStat.DAY, None),
[ce | iling_day, ceiling_day+DAY])
# test min_length
self.assertEqual(time_range(ceiling_hour, ceiling_hour+HOUR, CountStat.HOUR, 4),
[ceiling_hour-2*HOUR, ceiling_hour-HOUR, ceiling_hour, ceiling_hour+HOUR])
self.assertEqual(time_range(ceiling_day, ceiling_day+DAY, CountStat.DAY, 4),
[ceiling_day-2*DAY, ceiling_day-DAY, ceiling_day, ceiling_day+DAY])
|
srslynow/legal-text-mining | bow_cnn.py | Python | gpl-3.0 | 2,108 | 0.005218 | from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
import numpy as np
batch_size = 128
nb_classes = 7
nb_epoch = 12
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_ | size = (3, 3)
X_train = np.load("train_data.npy")
y_train = np.load("train_label.npy")
X_test = np.load("test_data.npy")
y_test = np.load("test_label.npy")
X_train = X_train[..., np.newaxis]
X_test = X_te | st[..., np.newaxis]
samples = X_train.shape[0]
X_train = np.reshape(X_train, (samples, 100, 50))
samples = X_test.shape[0]
X_test = np.reshape(X_test, (samples, 100, 50))
X_train = X_train[..., np.newaxis]
X_test = X_test[..., np.newaxis]
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1], border_mode='valid', input_shape=(100, 50, 1)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
score = model.evaluate(X_test, y_test, verbose=0)
model.save("cnn_model.h5")
print('Test score:', score[0])
print('Test accuracy:', score[1]) |
sergey-dryabzhinsky/dedupsqlfs | dedupsqlfs/app/mkfs.py | Python | mit | 11,069 | 0.004517 | # -*- coding: utf8 -*-
"""
@todo: Update argument parser options
"""
# Imports. {{{1
import sys
# Try to load the required modules from Python's standard library.
try:
import os
import argparse
from time import time
import hashlib
except ImportError as e:
msg = "Error: Failed to load one of the required Python modules! (%s)\n"
sys.stderr.write(msg % str(e))
sys.exit(1)
from dedupsqlfs.log import logging
from dedupsqlfs.lib import constants
from dedupsqlfs.db import check_engines
import dedupsqlfs
def mkfs(options, compression_methods=None, hash_functions=None):
from dedupsqlfs.fuse.dedupfs import DedupFS
from dedupsqlfs.fuse.operations import DedupOperations
ops = None
ret = 0
try:
ops = DedupOperations()
_fuse = DedupFS(
ops, None,
options,
fsname="dedupsqlfs", allow_root=True)
if not _fuse.checkIfLocked():
_fuse.saveCompressionMethods(compression_methods)
for modname in compression_methods:
_fuse.appendCompression(modname)
_fuse.setOption("gc_umount_enabled", False)
_fuse.setOption("gc_vacuum_enabled", False)
_fuse.setOption("gc_enabled", False)
_fuse.operations.init()
_fuse.operations.destroy()
except Exception:
import traceback
print(traceback.format_exc())
ret = -1
if ops:
ops.getManager().close()
return ret
def main(): # {{{1
"""
This function enables using mkfs.dedupsqlfs.py as a shell script that creates FUSE
mount points. Execute "mkfs.dedupsqlfs -h" for a list of valid command line options.
"""
logger = logging.getLogger("mkfs.dedupsqlfs/main")
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stderr))
parser = argparse.ArgumentParser(
prog="%s/%s mkfs/%s python/%s" % (dedupsqlfs.__name__, dedupsqlfs.__version__, dedupsqlfs.__fsversion__, sys.version.split()[0]),
conflict_handler="resolve")
# Register some custom command line options with the option parser.
option_stored_in_db = " (this option is only useful when creating a new database, because your choice is stored in the database and can't be changed after that)"
parser.add_argument('-h', '--help', action='help', help="show this help message followed by the command line options defined by the Python FUSE binding and exit")
parser.add_argument('-v', '--verbose', action='count', dest='verbosity', default=0, help="increase verbosity: 0 - error, 1 - warning, 2 - info, 3 - debug, 4 - verbose")
parser.add_argument('--log-file', dest='log_file', help="specify log file location")
parser.add_argument('--log-file-only', dest='log_file_only', action='store_true',
help="Don't send log messages to stderr.")
parser.add_argument('--data', dest='data', metavar='DIRECTORY', default="~/data", help="Specify the base location for the files in which metadata and blocks data is stored. Defaults to ~/data")
parser.add_argument('--name', dest='name', metavar='DATABASE', default="dedupsqlfs", help="Specify the name for the database directory in which metadata and blocks data is stored. Defaults to dedupsqlfs")
parser.add_argument('--temp', dest='temp', metavar='DIRECTORY', help="Specify the location for the files in which temporary data is stored. By default honour TMPDIR environment variable value.")
parser.add_argument('-b', '--block-size', dest='block_size', metavar='BYTES', default=1024*128, type=int, help="Specify the maximum block size in bytes" + option_stored_in_db + ". Defaults to 128kB.")
parser.add_argument('--memory-limit', dest='memory_limit', action='store_true', help="Use some lower values for less memory consumption.")
parser.add_argument('--cpu-limit', dest='cpu_limit', metavar='NUMBER', default=0, type=int, help="Specify the maximum CPU count to use in multiprocess compression. Defaults to 0 (auto).")
engines, msg = check_engines()
if not engines:
logger.error("No storage engines available! Please install sqlite or pymysql python module!")
return 1
parser.add_argument('--storage-engine', dest='storage_engine', metavar='ENGINE', choices=engines, default=engines[0],
help=msg)
if "mysql" in engines:
from dedupsqlfs.db.mysql import get_table_engines
table_engines = get_table_engines()
msg = "One of MySQL table engines: "+", ".join(table_engines)+". Default: %r. Aria and TokuDB engine can be used only with MariaDB or Percona server." % table_engines[0]
parser.add_argument('--table-engine', dest='table_engine', metavar='ENGINE',
choices=table_engines, default=table_engines[0],
help=msg)
parser.add_argument('--no-cache', dest='use_cache', action='store_false', help="Don't use cache in memory and delayed write to storage.")
parser.add_argument('--no-transactions', dest='use_transactions', action='store_false', help="Don't use transactions when making multiple related changes, this might make the file system faster or slower (?).")
parser.add_argument('--no-sync', dest='synchronous', action='store_false', help="Disable SQLite's normal synchronous behavior | which guarantees that data is written to disk immediately, because it slows down the file system too much (this means you might lose data when the mount point isn't cleanly unmounted).")
# Dynamically check for supported hashing algorithms.
msg = "Specify the hashing algorithm that will be used to recognize duplicate data blocks: one of %s. Choose wisely - it can't be cha | nged on the fly."
hash_functions = list({}.fromkeys([h.lower() for h in hashlib.algorithms_available]).keys())
hash_functions.sort()
work_hash_funcs = set(hash_functions) & constants.WANTED_HASH_FUCTIONS
msg %= ', '.join('%r' % fun for fun in work_hash_funcs)
defHash = 'md5' # Hope it will be there always. Stupid.
msg += ". Defaults to %r." % defHash
parser.add_argument('--hash', dest='hash_function', metavar='FUNCTION', choices=work_hash_funcs, default=defHash, help=msg)
# Dynamically check for supported compression methods.
compression_methods = [constants.COMPRESSION_TYPE_NONE]
compression_methods_cmd = [constants.COMPRESSION_TYPE_NONE]
for modname in constants.COMPRESSION_SUPPORTED:
try:
module = __import__(modname)
if hasattr(module, 'compress') and hasattr(module, 'decompress'):
compression_methods.append(modname)
if modname not in constants.COMPRESSION_READONLY:
compression_methods_cmd.append(modname)
except ImportError:
pass
if len(compression_methods) > 1:
compression_methods_cmd.append(constants.COMPRESSION_TYPE_BEST)
compression_methods_cmd.append(constants.COMPRESSION_TYPE_CUSTOM)
msg = "Enable compression of data blocks using one of the supported compression methods: one of %s"
msg %= ', '.join('%r' % mth for mth in compression_methods_cmd)
msg += ". Defaults to %r." % constants.COMPRESSION_TYPE_NONE
msg += " You can use <method>:<level> syntax, <level> can be integer or value from --compression-level."
if len(compression_methods_cmd) > 1:
msg += " %r will try all compression methods and choose one with smaller result data." % constants.COMPRESSION_TYPE_BEST
msg += " %r will try selected compression methods (--custom-compress) and choose one with smaller result data." % constants.COMPRESSION_TYPE_CUSTOM
msg += "\nDefaults to %r." % constants.COMPRESSION_TYPE_NONE
parser.add_argument('--compress', dest='compression', metavar='METHOD', action="append",
default=[constants.COMPRESSION_TYPE_NONE], help=msg)
msg = "Enable compression of data blocks using one or more of the supported compression methods: %s"
msg %= ', '.join('%r' % mth for mth in compression_methods_cmd[:-2])
msg += ". To use two or more methods select this optio |
matthewayne/evernote-sdk-python | sample/all_methods/findNoteCounts.py | Python | bsd-3-clause | 2,735 | 0.012066 | # Import the Evernote client
from evernote.api.client import EvernoteClient
# Import the Evernote note storetypes to get note datatypes
# to properly get note/tag counts (note filter)
import evernote.edam.notestore.ttypes as NoteStoreTypes
# Define access token either:
# Developer Tokens (https://dev.evernote.com/doc/articles/dev_tokens.php)
# or OAuth (https://dev.evernote.com/doc/articles/authentication.php)
access_token = "insert dev or oauth token here"
# Setup the client
client = EvernoteClient(token = access_token, sandbox = True)
# Get note store object
note_store = client.get_note_store()
# Create note filter object
note_filter = NoteStoreTypes.NoteFilter()
# Set note filter search grammer to get notes created in the last 2 days
note_filter.words = "created:day-2"
# Uncommend the following line to set note filter tag GUIDs
#note_filter.tagGuids = ["GUID of tag1", "GUID of tag 2", "...."]
# Set note filter order to descending
note_filter.ascending = False
# Set note filter inative attribute to False (will search only active notes)
# setting this value to True will only return search results that are in the trash
note_filter.inactive = False
# Uncomment the following line to set note time zone of the search to 'America/Los_Angeles'
#note_filter.timeZone = "America/Los_Angeles"
# Uncomment the following line to set note filter emphasized attribute to additional
# 'wish list' search grammer to be used in conjunction with the orinigal search query to
# highlight search results
#note_filter.emphasized = "any: tag:cool -tag:uncool"
# Uncomment the following line to set note filter includeAllReadableNotebooks attribute
# to include all readable business notebooks in a search
# search must be performed on a business note store with a business auth token
#note_filter.includeAllReadableNotebooks=True
# (Boolean) Include note/tags that are in the trash in your note counts
include_trash = True
# Returns an object which maps the number of notes captured by the filter to the corresponding
# notebook GUID
note_counts = note_store.findNoteCounts( note_filter, include_trash )
if note_counts.notebookCounts != None:
print "Found results fro | m %s noteboo | ks" % len(note_counts.notebookCounts)
for notebook in note_counts.notebookCounts:
print " Notebook with GUID %s has %s note(s) that match the filter" % (notebook, note_counts.notebookCounts[notebook])
if note_counts.tagCounts != None:
print "Found results from %s tags" % len(note_counts.notebookCounts)
for tag in note_counts.tagCounts:
print " Tag with GUID %s has %s note(s) that match the filter" % (tag, note_counts.tagCounts[tag])
if not note_counts.tagCounts and not note_counts.notebookCounts:
print "No results" |
nginx/unit | test/unit/check/chroot.py | Python | apache-2.0 | 748 | 0 | import json
from unit.http import TestHTTP
from unit.option import option
http = TestHTTP() |
def che | ck_chroot():
available = option.available
resp = http.put(
url='/config',
sock_type='unix',
addr=option.temp_dir + '/control.unit.sock',
body=json.dumps(
{
"listeners": {"*:7080": {"pass": "routes"}},
"routes": [
{
"action": {
"share": option.temp_dir,
"chroot": option.temp_dir,
}
}
],
}
),
)
if 'success' in resp['body']:
available['features']['chroot'] = True
|
atizo/djangojames | djangojames/templatetags/truncate.py | Python | gpl-2.0 | 2,021 | 0.005443 | # -*- coding: utf-8 -*-
#
# ITerativ GmbH
# http://www.iterativ.ch/
#
# Copyright (c) 2012 ITerativ GmbH. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Created on Mar 2, 2012
# @author: github.com/maersu
from django.template import Library
from django.utils.encoding import force_unicode
from django.utils.functional import allow_lazy
from django.template.defaultfilters import stringfilter
import re
register = Library()
def truncate_string(s, num):
s = force_unicode(s)
newlength = int(num)
if len(s) > newlength:
length = newlength - 3
if s[length-1] == ' ' or s[length] == ' ':
s = s[:length].strip()
else:
words = re.split(' *', s[:length])
if len(words) > 1:
del words[-1]
s = u' '.join(words)
s += ' ...'
return s
truncate_chars = allow_lazy(truncate_string, unicode)
@register.filter
@stringfilter
def truncatestr | ing(value, arg):
"""
Truncates the string after a number of characters. It respects word boundaries and keeps newlines.
Argument: Number of characters.
"""
try:
length = int(arg)
except ValueError: # If the argument is not | a valid integer.
return value # Fail silently.
return truncate_chars(value, length)
truncatestring.is_safe = True |
bzamecnik/sms-tools | lectures/01-Introduction/plots-code/even-odd.py | Python | agpl-3.0 | 599 | 0.003339 | # matplotlib without any blocking GUI
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as | plt
import numpy as np
N = 500
k = 3
plt.figure(1)
s = np.exp(1j * 2 * np.pi * k / N * np.arange(-N / 2, N / 2))
plt.subplot(1, 2, 1)
plt.plot(np.arange(-N / 2, N / 2), np.real(s), lw=2)
plt.axvline(0, color='g', lw=2)
plt.axis([-N / 2, N / 2, -1, 1])
plt. | title('cosine (even)')
plt.subplot(1, 2, 2)
plt.plot(np.arange(-N / 2, N / 2), np.imag(s), lw=2)
plt.axvline(0, color='g', lw=2)
plt.axis([-N / 2, N / 2, -1, 1])
plt.title('sine (odd)')
plt.tight_layout()
plt.savefig('even-odd.png')
|
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sympy/functions/special/tests/test_tensor_functions.py | Python | agpl-3.0 | 698 | 0.010029 | from sympy import symbols, Dij, LeviCivita
x, y = symbols('x,y')
def test_Dij():
assert Dij(1, 1) == 1
assert Dij(1, 2) == 0
assert Dij(x, x) == 1
assert Dij(x**2-y**2, x**2-y**2) == 1
def test_levicivita():
assert LeviCivita(1, 2, 3) == 1
assert LeviCivita(1, 3, 2) == -1
assert LeviCivita(1, 2, 2) == 0
i,j,k = symbols('i j k')
assert LeviCivita(i, j, k) == LeviCivita(i,j,k, evaluate= | False)
assert LeviCivita(i, j, i) == 0
assert LeviCivita(1, i, i) == 0
assert LeviCivita(i, j, k).doit() == (j - i)*(k - i)*(k - j)/2
assert LeviCivita(1, 2, 3, 1) == 0
assert LeviCivita(4, 5, 1, 2, 3) == 1
asser | t LeviCivita(4, 5, 2, 1, 3) == -1
|
devinbalkind/eden | tests/unit_tests/modules/s3/s3gis/GeoJSONLayer.py | Python | mit | 812 | 0.025862 |
s3gis_tests = load_module("tests.unit_tests.modules.s3.s3gis")
def test_GeoJSONLayer():
s3gis_tests.layer_test(
db,
db.gis_layer_geojson,
dict(
name = "Test GeoJSON",
| description = "Test GeoJSON layer",
enabled = True,
created_on = datetime.datetime.now(),
modified_on = datetime.datetime.now(),
url = "test://test_ | GeoJSON",
),
"S3.gis.layers_geojson",
[
{
"marker_height": 34,
"marker_image": u"gis_marker.image.marker_red.png",
"marker_width": 20,
"name": u"Test GeoJSON",
"url": u"test://test_GeoJSON"
}
],
session = session,
request = request,
)
|
koery/win-sublime | Data/Packages/Package Control/package_control/package_cleanup.py | Python | mit | 15,806 | 0.003416 | import threading
import os
import sublime
from .show_error import show_error
from .console_write import console_write
from .unicode import unicode_from_os
from .clear_directory import clear_directory, delete_directory, clean_old_files
from .automatic_upgrader import AutomaticUpgrader
from .package_manager import PackageManager
from .open_compat import open_compat
from .package_io import package_file_exists
from .settings import preferences_filename, pc_settings_filename, load_list_setting, save_list_setting
from . import loader
from .providers.release_selector import is_compatible_version
class PackageCleanup(threading.Thread):
"""
Cleans up folders for packages that were removed, but that still have files
in use.
"""
def __init__(self):
self.manager = PackageManager()
settings = sublime.load_settings(pc_settings_filename())
self.original_installed_packages = load_list_setting(settings, 'installed_packages')
self.remove_orphaned = settings.get('remove_orphaned', True)
threading.Thread.__init__(self)
def run(self):
found_packages = []
installed_packages = list(self.original_installed_packages)
found_dependencies = []
installed_dependencies = self.manager.list_dependencies()
extra_dependencies = list(set(installed_dependencies) - set(self.manager.find_required_dependencies()))
# Clean up unneeded dependencies so that found_dependencies will only
# end up having required dependencies added to it
for dependency in extra_dependencies:
dependency_dir = os.path.join(sublime.packages_path(), dependency)
if delete_directory(dependency_dir):
console_write(u'Removed directory for unneeded dependency %s' % dependency, True)
else:
cleanup_file = os.path.join(dependency_dir, 'package-control.cleanup')
if not os.path.exists(cleanup_file):
open_compat(cleanup_file, 'w').close()
error_string = (u'Unable to remove directory for unneeded dependency ' +
u'%s - deferring until next start') % dependency
console_write(error_string, True)
# Make sure when cleaning up the dependency files that we remove the loader for it also
loader.remove(dependency)
for package_name in os.listdir(sublime.packages_path()):
found = True
package_dir = os.path.join(sublime.packages_path(), package_name)
if not os.path.isdir(package_dir):
continue
clean_old_files(package_dir)
# Cleanup packages/dependencies that could not be removed due to in-use files
cleanup_file = os.path.join(package_dir, 'package-control.cleanup')
if os.path.exists(cleanup_file):
if delete_directory(package_dir):
console_write(u'Removed old directory %s' % package_name, True)
found = False
else:
if not os.path.exists(cleanup_file):
open_compat(cleanup_file, 'w').close()
error_string = (u'Unable to remove old directory ' +
u'%s - deferring until next start') % package_name
console_write(error_string, True)
# Finish reinstalling packages that could not be upgraded due to
# in-use files
reinstall = os.path.join(package_dir, 'package-control.reinstall')
if os.path.exists(reinstall):
metadata_path = os.path.join(package_dir, 'package-metadata.json')
if not clear_directory(package_dir, [metadata_path]):
if not os.path.exists(reinstall):
open_compat(reinstall, 'w').close()
# Assigning this here prevents the callback from referencing the value
# of the "package_name" variable when it is executed
restart_message = (u'An error occurred while trying to ' +
u'finish the upgrade of %s. You will most likely need to ' +
u'restart your computer to complete the upgrade.') % package_name
def show_still_locked():
show_error(restart_message)
sublime.set_timeout(show_still_locked, 10)
else:
self.manager.install_package(package_name)
if package_file_exists(package_name, 'package-metadata.json'):
# This adds previously installed packages from old versions of
# PC. As of PC 3.0, this should basically never actually be used
# since installed_packages was added in late 2011.
if not self.original_installed_packages:
installed_packages.append(package_name)
params = {
'package': package_name,
'operation': 'install',
'version': \
self.manager.get_metadata(package_name).get('version')
}
self.manager.record_usage(params)
# Cleanup packages that were installed via Package Control, but
# we removed from the "installed_packages" list - usually by
# removing them from another computer and the settings file
# being synced.
elif self.remove_orphaned and package_name not in self.original_installed_packages:
self.manager.backup_package_dir(package_name)
if delete_directory(package_dir):
console_write(u'Removed directory for orphaned package %s' % package_name, True)
found = False
else:
if not os.path.exists(cleanup_file):
open_compat(cleanup_file, 'w').close()
error_string = (u'Unable to remove directory for orphaned package ' +
u'%s - deferring until next start') % package_name
console_write(error_string, True)
if package_name[-20:] == '.package-control-old':
console_write(u'Removed old directory %s' % package_name, True)
delete_directory(package_dir)
# Skip over dependencies since we handle them separately
if package_file_exists(package_name, 'dependency-metadata.json'):
found_dependencies.append(package_name)
continue
if found:
found_packages.append(package_name)
if int(sublime.version()) >= 3000:
installed_path = sublime.installed_packages_path()
for file in os.listdir(installed_path):
if file[-16:] != '.sublime-package':
continue
package_name = file.replace('.sublime-package', '')
if package_name == loader.loader_package_name:
found_dependencies.append(package_name)
continue
# Cleanup packages that were installed via Package Control, but
# we removed from the "installed_packages" list - usually by
# removing th | em from another computer and the settings file
# being synced.
| if self.remove_orphaned and package_name not in self.original_installed_packages and package_file_exists(package_name, 'package-metadata.json'):
# Since Windows locks the .sublime-package files, we must
# do a dance where we disable the package first, which has
# to be done in the main Sublime Text thread.
package_filename = os.path.join(installed_path, file)
# Invoke a function to build the callback since we are in a loop
# and the variable values will change by the time the callback is
# |
mayl/dotfiles | t/setup.py | Python | gpl-3.0 | 346 | 0.00289 | t | ry:
from setuptools import setup
except:
from distutils.core import setup
setup( |
name='t',
version='1.2.0',
author='Steve Losh',
author_email='steve@stevelosh.com',
url='http://bitbucket.org/sjl/t',
py_modules=['t'],
entry_points={
'console_scripts': [
't = t:_main',
],
},
)
|
eriksore/sdn | Old/OpenDaylight.py | Python | mit | 14,381 | 0.005285 | """
OpenDaylight REST API
Copyright 2013 The University of Wisconsin Board of Regents
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed un | der the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Written by: Dale W. Carder, dwcarder@wi | sc.edu
Network Services Group
Division of Information Technology
University of Wisconsin at Madison
This material is based upon work supported by the National Science Foundation
under Grant No. 1247322.
"""
from __future__ import print_function
import json
import requests
from requests.auth import HTTPBasicAuth
class OpenDaylight(object):
"""An object holding details to talk to the OpenDaylight REST API
OpenDaylight.setup is a dictionary loaded with the following
default values:
{'hostname':'localhost',
'port':'8080',
'username':'admin',
'password':'admin',
'path':'/controller/nb/v2/',
'container':'default',
'http':'http://' }
Your code should change these as required for your installation.
OpenDaylight.url holds the url for each REST query. Typically
you would let OpenDaylight.prepare() build this for you.
OpenDaylight.auth holds an auth object for Requests to use
for each REST query. Typically you would also let
OpenDaylight.prepare() build this for you.
"""
def __init__(self):
"""Set some mostly reasonable defaults.
"""
self.setup = {'hostname':'192.168.231.246',
'port':'8080',
'username':'admin',
'password':'admin',
'path':'/controller/nb/v2/',
'container':'default',
'http':'http://'}
self._base_url = None
self.url = None
self.auth = None
def prepare(self, app, path):
"""Sets up the necessary details for the REST connection by calling
prepare_url and prepare_auth.
Arguments:
'app' - which OpenDaylight northbound api component (application)
we want to talk to.
'path' - the specific rest query for the application.
"""
self.prepare_url(app, path)
self.prepare_auth()
def prepare_url(self, app, path):
"""Build the URL for this REST connection which is then stored as
OpenDaylight.url
If you use prepare(), you shouldn't need to call prepare_url()
yourself. However, if there were a URL you wanted to construct that
was so whacked out custom, then by all means build it yourself and don't
bother to call this function.
Arguments:
'app' - which OpenDaylight northbound api component (application)
we want to talk to.
'path' - the specific rest query for the application.
Note that other attributes, including 'container' are specified
in the OpenDaylight.setup dictionary.
"""
# the base url we will use for the connection
self._base_url = self.setup['http'] + self.setup['hostname'] + ':' + \
self.setup['port'] + self.setup['path']
# the specific path we are building
self.url = self._base_url + app + '/' + self.setup['container'] + path
def prepare_auth(self):
"""Set up the credentials for the REST connection by creating
an auth object for Requests and shoving it into OpenDaylight.auth
Currently, as far as I know, the OpenDaylight controller uses
http basic auth. If/when that changes this function should be
updated.
If you use prepare(), you shouldn't need to call prepare_auth()
yourself. However, if there were something you wanted to do
that was so whacked out custom, then by all means build it yourself
and don't bother to call this function.
"""
# stuff an HTTPBasicAuth object in here ready for use
self.auth = HTTPBasicAuth(self.setup['username'],
self.setup['password'])
#print("Prepare set up auth: " + self.setup['username'] + ', ' + \
# self.setup['password'])
class OpenDaylightFlow(object):
"""OpenDaylightFlow is an object that talks to the OpenDaylight
Flow Programmer application REST API
OpenDaylight.odl holds an OpenDaylight object containing details
on how to communicate with the controller.
OpenDaylightFlow.request holds a Requests object for the REST
session. Take a look at the Requests documentation for all of
the methods available, but here are a few handy examples:
OpenDaylightFlow.request.status_code - returns the http code
OpenDaylightFlow.request.text - returns the response as text
OpenDaylightFlow.flows holds a dictionary that corresponds to
the flowConfig element in the OpenDaylight REST API. Note that
we don't statically define what those fields are here in this
object. This makes this library code more flexible as flowConfig
changes over time. After all, this is REST, not RPC.
"""
def __init__(self, odl):
"""Mandatory argument:
odl - an OpenDaylight object
"""
self.odl = odl
self.__app = 'flow'
self.request = None
self.flows = None
def get(self, node_id=None, flow_name=None):
"""Get Flows specified on the Controller and stuffs the results into
the OpenDaylightFlow.flows dictionary.
Optional Arguments:
node_id - returns flows just for that switch dpid
flow_name - returns the specifically named flow on that switch
"""
# clear out any remaining crud from previous calls
if hasattr(self, 'request'):
del self.request
if hasattr(self, 'flows'):
del self.flows
if node_id is None:
self.odl.prepare(self.__app, '/')
elif flow_name is None:
self.odl.prepare(self.__app, '/' + 'OF/' + node_id + '/')
else:
self.odl.prepare(self.__app, '/' + 'OF/' + node_id + '/'
+ flow_name + '/')
self.request = requests.get(url=self.odl.url, auth=self.odl.auth)
if self.request.status_code == 200:
self.flows = self.request.json()
if 'flowConfig' in self.flows:
self.flows = self.flows.get('flowConfig')
else:
raise OpenDaylightError({'url':self.odl.url,
'http_code':self.request.status_code,
'msg':self.request.text})
def add(self, flow):
"""Given a dictionary corresponding to a flowConfig, add this flow to
the Controller. Note that the switch dpid and the flow's name is
specified in the flowConfig passed in.
"""
if hasattr(self, 'request'):
del self.request
#print(flow)
self.odl.prepare(self.__app, '/' + flow['node']['@type'] + '/' +
flow['node']['@id'] + '/' + flow['name'] + '/')
headers = {'Content-type': 'application/json'}
body = json.dumps(flow)
self.request = requests.post(url=self.odl.url, auth=self.odl.auth,
data=body, headers=headers)
if self.request.status_code != 201:
raise OpenDaylightError({'url':self.odl.url,
'http_code':self.request.status_code, |
sandeepkrjha/pgmpy | pgmpy/tests/test_sampling/test_continuous_sampling.py | Python | mit | 8,100 | 0.005556 | import unittest
import numpy as np
from pgmpy.factors.continuous import JointGaussianDistribution as JGD
from pgmpy.sampling import (HamiltonianMC as HMC, HamiltonianMCDA as HMCda, GradLogPDFGaussian, NoUTurnSampler as NUTS,
NoUTurnSamplerDA as NUTSda)
class TestHMCInference(unittest.TestCase):
def setUp(self):
mean = [-1, 1, -1]
covariance = np.array([[3, 0.8, 0.2], [0.8, 2, 0.3], [0.2, 0.3, 1]])
self.test_model = JGD(['x', 'y', 'z'], mean, covariance)
self.hmc_sampler = HMCda(model=self.test_model, grad_log_pdf=GradLogPDFGaussian)
def test_errors(self):
with self.assertRaises(TypeError):
HMCda(model=self.test_model, grad_log_pdf=1)
with self.assertRaises(TypeError):
HMCda(model=self.test_model, grad_log_pdf=GradLogPDFGaussian, simulate_dynamics=1)
with self.assertRaises(ValueError):
HMCda(model=self.test_model, delta=-1)
with self.assertRaises(TypeError):
self.hmc_sampler.sample(initial_pos=1, num_adapt=1, num_samples=1, trajectory_length=1)
with self.assertRaises(TypeError):
self.hmc_sampler.generate_sample(1, 1, 1, 1).send(None)
with self.assertRaises(TypeError):
HMC(model=self.test_model).sample(initial_pos=1, num_samples=1, trajectory_length=1)
with self.assertRaises(TypeError):
HMC(model=self.test_model).generate_sample(1, 1, 1).send(None)
def test_acceptance_prob(self):
acceptance_probability = self.hmc_sampler._acceptance_prob(np.array([1, 2, 3]), np.array([2, 3, 4]),
np.array([1, -1, 1]), np.array([0, 0, 0]))
np.testing.assert_almost_equal(acceptance_probability, 0.0347363)
def test_find_resonable_stepsize(self):
np.random.seed(987654321)
stepsize = self.hmc_sampler._find_reasonable_stepsize(np.array([-1, 1, -1]))
np.testing.assert_almost_equal(stepsize, 2.0)
def test_adapt_params(self):
stepsize, stepsize_bar, h_bar = self.hmc_sampler._adapt_params(0.0025, 1, 1, np.log(0.025), 2, 1)
np.testing.assert_almost_equal(stepsize, 3.13439452e-13)
np.testing.assert_almost_equal(stepsize_bar, 3.6742481e-08)
np.testing.assert_almost_equal(h_bar, 0.8875)
def test_sample(self):
# Seeding is done for _find_reasonable_stepsize method
# Testing sample method simple HMC
np.random.seed(3124141)
samples = self.hmc_sampler.sample(initial_pos=[0.3, 0.4, 0.2], num_adapt=0,
num_samples=10000, trajectory_length=4)
covariance = np.cov(samples.values.T)
self.assertTrue(np.linalg.norm(covariance - self.test_model.covariance) < 3)
# Testing sample of method of HMCda
np.random.seed(3124141)
samples = self.hmc_sampler.sample(initial_pos=[0.6, 0.2, 0.8], num_adapt=10000,
num_samples=10000, trajectory_length=4)
covariance = np.cov(samples.values.T)
self.assertTrue(np.linalg.norm(covariance - self.test_model.covariance) < 0.3)
# Testing generate_sample method of simple HMC
np.random.seed(3124141)
gen_samples = self.hmc_sampler.generate_sample(initial_pos=[0.3, 0.4, 0.2], num_adapt=0,
num_samples=10000, trajectory_length=4)
samples = np.array([sample for sample in gen_samples])
covariance = np.cov(samples.T)
self.assertTrue(np.linalg.norm(covariance - self.test_model.covariance) < 3)
# Testing sample of method of HMCda
np.random.seed(3124141)
gen_samples = self.hmc_sampler.generate_sample(initial_pos=[0.6, 0.2, 0.8], num_adapt=10000,
num_samples=10000, trajectory_length=4)
samples = np.array([sample for sample in gen_samples])
covariance = np.cov(samples.T)
self.assertTrue(np.linalg.norm(covariance - self.test_model.covariance) < 0.3)
def tearDown(self):
del self.hmc_sampler
del self.test_model
class TestNUTSInference(unittest.TestCase):
def setUp(self):
mean = np.array([-1, 1, 0])
covariance = np.array([[6, 0.7, 0.2], [0.7, 3, 0.9], [0.2, 0.9, 1]])
self.test_model = JGD(['x', 'y', 'z'], mean, covariance)
self.nuts_sampler = NUTSda(model=self.test_model, grad_log_pdf=GradLogPDFGaussian)
def test_errors(self):
with self.assertRaises(TypeError):
NUTS(model=self.test_model, grad_log_pdf=JGD)
with self.assertRaises(TypeError):
NUTS(model=self.test_model, grad_log_pdf=None, simulate_dynamics=GradLogPDFGaussian)
with self.assertRaises(ValueError):
NUTSda(model=self.test_model, delta=-0.2, grad_log_pdf=None)
with self.assertRaises(ValueError):
NUTSda(model=self.test_model, delta=1.1, grad_log_pdf=GradLogPDFGaussian)
with self.assertRaises(TypeError):
NUTS(self.test_model, GradLogPDFGaussian).sample(initial_pos={1, 1, 1}, num_samples=1)
with self.assertRaises(ValueError):
NUTS(self.test_model, GradLogPDFGaussian).sample(initial_pos=[1, 1], num_samples=1)
with self.assertRaises(TypeError):
NUTSda(self.test_model, GradLogPDFGaussian).sample(initial_pos=1, num_samples=1, num_adapt=1)
with self.assertRaises(ValueError):
NUTSda(self.test_model, GradLogPDFGaussian).sample(initial_pos=[1, 1, 1, 1], num_samples=1, num_adapt=1)
with self.assertRaises(TypeError):
NUTS(self.test_model, GradLogPDFGaussian).generate_sample(initial_pos=0.1, num_samples=1).send(None)
with self.assertRaises(ValueError):
NUTS(self.test_model, GradLogPDFGaussian).generate_sample(initial_pos=(0, 1, 1, 1),
num_samples=1).send(None)
with self.assertRaises(TypeError):
NUTSda(self.test_model, GradLogPDFGaussian).generate_sample(initial_pos=[[1, 2, 3]], num_samples=1,
num_adapt=1).send(None)
with self.assertRaises(ValueError):
NUTSda(self.test_model, GradLogPDFGaussian).generate_sample(initial_pos=[1], num_samples=1,
num_adapt=1).send(None)
def test_sampling(self):
np.random.seed(1010101)
samples = self.nuts_sampler.sample(initial_pos=[-0.4, 1, 3.6], num_adapt=0, num_samples=10000,
return_type='recarray')
sample_array = np.array([samples[var_name] for var_name in self.test_model.variables])
sample_covariance = np.cov(sample_array)
self.assertTrue(np.linalg.norm(sample_covariance - self.test_model.covariance) < 3)
np.random.seed(12 | 10161 | )
samples = self.nuts_sampler.generate_sample(initial_pos=[-0.4, 1, 3.6], num_adapt=0, num_samples=10000)
samples_array = np.array([sample for sample in samples])
sample_covariance = np.cov(samples_array.T)
self.assertTrue(np.linalg.norm(sample_covariance - self.test_model.covariance) < 3)
np.random.seed(12313131)
samples = self.nuts_sampler.sample(initial_pos=[0.2, 0.4, 2.2], num_adapt=10000, num_samples=10000)
sample_covariance = np.cov(samples.values.T)
self.assertTrue(np.linalg.norm(sample_covariance - self.test_model.covariance) < 0.4)
np.random.seed(921312312)
samples = self.nuts_sampler.generate_sample(initial_pos=[0.2, 0.4, 2.2], num_adapt=10000, num_samples=10000)
samples_array = np.array([sample for sample in samples])
sample_covariance = np.cov(samples_array.T)
self.assertTrue(np.linalg.norm(sample_covariance - self.test_model.covariance) < 0.4)
def tearDown(self):
del self.test_model
del self.nuts_sampler
|
alcemirfernandes/irobotgame | lib/data.py | Python | gpl-3.0 | 788 | 0.006345 | # I Robot? - a dancing robot game for pyweek
#
# Copyright: 2008 Hugo Ruscitti
# License: GPL 3
# Web: http://www.losersjuegos.com.ar
'''Simple data loader module.
Loads data files from the "data" directory shipped with a game.
Enhancing this to h | andle caching etc. is left as an exercise for the reader.
'''
import os
#data_py = os.path.abspath(os.path.dirname(__file__))
#data_dir = os.path.normpath(os.path.join(data_py, '..', 'data'))
data_dir = 'data'
def filepath(filename):
'''Determine the path to a file in the data directory.
'''
return os.path.join(data_dir, filename)
def load(filename, mode='rb'):
'''Open a file in the data directory.
"mode" is passed as the second arg to open().
'''
return open( | os.path.join(data_dir, filename), mode)
|
zentralopensource/zentral | zentral/contrib/inventory/compliance_checks.py | Python | apache-2.0 | 5,942 | 0.003366 | import logging
import threading
import time
from django.utils.functional import cached_property, SimpleLazyObject
import jmespath
from zentral.core.compliance_checks import register_compliance_check_class
from zentral.core.compliance_checks.compliance_checks import BaseComplianceCheck
from zentral.core.compliance_checks.models import Status
from zentral.core.compliance_checks.utils import update_machine_statuses
from .events import JMESPathCheckStatusUpdated
from .models import JMESPathCheck, MachineTag
logger = logging.getLogger("zentral.contrib.inventory.compliance_checks")
class InventoryJMESPathCheck(BaseComplianceCheck):
model_display = "Inventory JMESPath check"
required_view_permissions = ("inventory.view_jmespathcheck",)
scoped_cc_query = (
"select cc.model, cc.id, cc.name, cc.version "
"from compliance_checks_compliancecheck as cc "
"join inventory_jmespathcheck as jc on (jc.compliance_check_id = cc.id) "
"left join inventory_jmespathcheck_tags as jct on (jct.jmespathcheck_id = jc.id) "
"where (jct.tag_id is null or jct.tag_id = any (%(tag_ids)s)) "
"and lower(jc.source_name) in ("
" select distinct lower(s.name)"
" from inventory_source as s"
" join inventory_currentmachinesnapshot as cms on (cms.source_id = s.id)"
" where cms.serial_number = %(serial_number)s"
") and jc.platforms && array("
" select platform from inventory_machinesnapshot as ms"
" join inventory_currentmachinesnapshot as cms on (cms.machine_snapshot_id = ms.id)"
" where cms.serial_number = %(serial_number)s"
")"
)
@cached_property
def jmespath_check(self):
try:
return self.compliance_check.jmespath_check
except JMESPathCheck.DoesNotExist:
return
def get_redirect_url(self):
return self.jmespath_check.get_absolute_url()
register_compliance_check_class(InventoryJMESPathCheck)
class JMESPathChecksCache:
# TODO: hard coded ttl
ttl = 300 # cache ttl in seconds
def __init__(self):
self._source_platform_checks = {}
self._checks = {}
self._last_fetched_time = None
self._lock = threading.Lock()
def _load(self):
if self._last_fetched_time is not None and (time.monotonic() - self._last_fetched_time) < self.ttl:
return
self._source_platform_checks = {}
self._checks = {}
for jmespath_check in (JMESPathCheck.objects.select_related("compliance_check")
.prefetch_related("tags")
.all()):
source_name = jmespath_check.source_name.lower()
tags_set = frozenset(tag.id for tag in jmespath_check.tags.all())
compiled_jmespath_expression = jmespath.compile(jmespath_check.jmespath_expression)
for platform in jmespath_check.platforms:
self._source_platform_checks.setdefault((source_name, platform), []).append(
(tags_set, compiled_jmespath_expression, jmespath_check)
)
self._checks[jmespath_check.compliance_check.pk] = jmespath_check
self._last_fetched_time = time.monotonic()
def _get_source_platform_checks(self, source_name, platform):
with self._lock:
self._load()
return self._source_platform_checks.get((source_name.lower(), platform), [])
def process_tree(self, tree, last_seen):
machine_tag_set = None
compliance_check_statuses = []
serial_number = tree["serial_number"]
source_name = tree["source"]["name"]
platform = tree.get("platform")
if not platform:
logger.warning("Cannot process %s %s tree: missing platform", source_name, serial_number)
return
for check_tag_set, jmespath_parsed_expr, jmespath_check in self._get_source_platform_checks(
source_name,
platform
):
if check_tag_set:
if machine_tag_set is None:
# TODO cache?
machine_tag_set = set(
MachineTag.objects.filter(serial_number=serial_number).values_list("tag_i | d", flat=True)
)
if not check_tag_set.intersection(machine_tag_set):
# tags mismatch
continue
# default to unknown status
status = Status.UNKNOWN
try:
result = jmespath_parsed_expr.search(tree)
except Exception:
logger.exception("Could not evaluate JMESPath check %s", jmespath_check.pk)
else:
if result is True:
| status = Status.OK
elif result is False:
status = Status.FAILED
else:
logger.warning("JMESPath check %s result is not a boolean", jmespath_check.pk)
compliance_check_statuses.append((jmespath_check.compliance_check, status, last_seen))
if not compliance_check_statuses:
# nothing to update, no events
return
status_updates = update_machine_statuses(serial_number, compliance_check_statuses)
for compliance_check_pk, status_value, previous_status_value in status_updates:
if status_value == previous_status_value:
# status not updated, no event
continue
yield JMESPathCheckStatusUpdated.build_from_object_serial_number_and_statuses(
self._checks[compliance_check_pk],
serial_number,
Status(status_value),
Status(previous_status_value) if previous_status_value is not None else None
)
jmespath_checks_cache = SimpleLazyObject(lambda: JMESPathChecksCache())
|
twitter/pants | tests/python/pants_test/jvm/jvm_task_test_base.py | Python | apache-2.0 | 2,419 | 0.004961 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pants.backend.jvm.subsystems.resolve_subsystem import JvmResolveSubsystem
from pants.backend.jvm.tasks.classpath_products import ClasspathProducts
from pants.util.dirutil import safe_file_dump, safe_mkdir, safe_mkdtemp
from pants_test.subsystem.subsystem_util import init_subsystem
from pants_test.task_test_base import TaskTestBase
class JvmTaskTestBase(TaskTestBase):
"""
:API: public
"""
def setUp(self):
"""
:API: public
"""
super(JvmTaskTestBase, self).setUp()
init_subsystem(JvmResolveSubsystem)
self.set_options_for_scope('resolver', resolver='ivy')
def populate_runtime_classpath(self, context, classpath=None):
"""
Helps actual test cases to populate the 'runtime_classpath' products data mapping
in the context, which holds the classpath value for targets.
:API: public
:param context: The execution context where the products data mapping lives.
:param classpath: a list of classpath strings. If not specified,
[os.path.join(self.buildroot, 'none')] will be used.
"""
classpath = classpath or []
runtime_classpath = self.get_runtime_classpath(context)
runtime_classpath.add_for_targets(context.targets(),
[('default', entry) for entry in classpath])
def add_to_runtime_classpath(self, context, tgt, files_dict):
"""Creates and adds the given files to the classpath for the given target under a temp path.
:API: public
"""
runtime_classpath = self.get_runtime_classpath(context)
# Create a temporary directory under the target id, then dump all files.
target_dir = os.path.join(self.test_workdir, tgt.id)
safe_mkdir(target_dir)
classpath_dir = safe_mkdtemp(dir=target_dir)
for rel_path, content in files_dict.items():
| safe_file_dump(os.path.join(classpath_dir, rel_path), content)
# Add to the classpath.
runtime_classpath.add_for_target(tgt, [('default', classpath_dir)])
def get_runtime_classpath(self, context):
"""
:API: publ | ic
"""
return context.products.get_data('runtime_classpath', init_func=ClasspathProducts.init_func(self.pants_workdir))
|
sysbio-curie/NaviCell | bindings/python/setup.py | Python | lgpl-3.0 | 340 | 0.002941 | from distutils.core import setup
setup(
name='curie',
| version='0.1.1',
author='Eric Viara',
author_email='eric.viara@curie.fr',
packages=['curie'],
url='http://pypi.python.org/pypi/TowelStuff/',
license='LICENSE.txt',
description='NaviCell | Python Binding',
long_description=open('README.txt').read()
)
|
rdhyee/osf.io | website/addons/s3/tests/test_model.py | Python | apache-2.0 | 3,529 | 0.00255 | from nose.tools import * # noqa
import mock
from boto.s3.connection import * # noqa
from tests.base import OsfTestCase, get_default_metaschema
from tests.factories import ProjectFactory
from framework.auth import Auth
from website.addons.base.testing import models
from website.addons.s3.model import S3NodeSettings
from website.addons.s3.tests.factories import (
S3UserSettingsFactory,
S3NodeSettingsFactory,
S3AccountFactory
)
class TestUserSettings(models.OAuthAddonUserSettingTestSuiteMixin, OsfTestCase):
short_name = 's3'
full_name = 'Amazon S3'
ExternalAccountFactory = S3AccountFactory
class TestNodeSettings(models.OAuthAddonNodeSettingsTestSuiteMixin, OsfTestCase):
short_name = 's3'
full_name = 'Amazon S3'
ExternalAccountFactory = S3AccountFactory
NodeSettingsFactory = S3NodeSettingsFactory
NodeSettingsClass = S3NodeSettings
UserSettingsFactory = S3UserSettingsFactory
def test_registration_settings(self):
registration = ProjectFactory()
clone, message = self.node_settings.after_register(
self.node, registration, self.user,
)
assert_is_none(clone)
def test_before_register_no_settings(self):
self.node_settings.user_settings = None
message = self.node_settings.before_register(self.node, self.user)
assert_false(message)
def test_before_register_no_auth(self):
self.node_settings.external_account = None
message = self.node_settings.before_register(self | .node, self.user)
assert_false(message)
def test_before_register_settings_and_auth(self):
message = self.node_settings.before_register(self.node, self.user)
assert_true(message)
@mock.patch('website.archiver.tasks.archive')
def test_does_not_get_copied_to_registrations(self, mock_archive):
registration = self.node.register | _node(
schema=get_default_metaschema(),
auth=Auth(user=self.user),
data='hodor',
)
assert_false(registration.has_addon('s3'))
## Overrides ##
def test_serialize_credentials(self):
self.user_settings.external_accounts[0].oauth_key = 'key-11'
self.user_settings.external_accounts[0].oauth_secret = 'secret-15'
self.user_settings.save()
credentials = self.node_settings.serialize_waterbutler_credentials()
expected = {'access_key': self.node_settings.external_account.oauth_key,
'secret_key': self.node_settings.external_account.oauth_secret}
assert_equal(credentials, expected)
@mock.patch('website.addons.s3.model.bucket_exists')
@mock.patch('website.addons.s3.model.get_bucket_location_or_error')
def test_set_folder(self, mock_location, mock_exists):
mock_exists.return_value = True
mock_location.return_value = ''
folder_id = '1234567890'
self.node_settings.set_folder(folder_id, auth=Auth(self.user))
self.node_settings.save()
# Bucket was set
assert_equal(self.node_settings.folder_id, folder_id)
# Log was saved
last_log = self.node.logs[-1]
assert_equal(last_log.action, '{0}_bucket_linked'.format(self.short_name))
def test_serialize_settings(self):
settings = self.node_settings.serialize_waterbutler_settings()
expected = {'bucket': self.node_settings.folder_id,
'encrypt_uploads': self.node_settings.encrypt_uploads}
assert_equal(settings, expected)
|
overplumbum/bugsnag-python | example/django/bugsnag_demo/wsgi.py | Python | mit | 399 | 0.002506 | """
WSGI config for bugsnag_demo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bugsnag_demo.settings")
from django.core.wsgi import get_wsgi_application
appli | cati | on = get_wsgi_application()
|
PolyJIT/benchbuild | tests/environments/domain/test_model.py | Python | mit | 2,780 | 0 | """
Describe usage of our default container/image domain.
"""
from typing import Hashable
from benchbuild.environments.domain import model
def describe_layers():
def from_is_hashable():
layer = model.FromLayer('a')
assert isinstance(layer, Hashable)
def add_is_hashable():
layer = model.AddLayer(('a', 'b', 'c'), 'd')
assert isinstance(layer, Hashable)
def copy_is_hashable():
layer = model.CopyLayer(('a', 'b', 'c'), 'd')
assert isinstance(layer, Hashable)
def run_is_hashable():
layer = model.RunLayer(
'cmd', ('a', 'b', 'c'), dict(a='a', b='b', c='c')
)
assert isinstance(layer, Hashable)
def context_is_hashable():
layer = model.ContextLayer(lambda: None)
assert isinstance(layer, Hashable)
def env_is_hashable():
layer = model.UpdateEnv(dict(a='a', b='b', c='c'))
assert isinstance(layer, Hashable)
def workdir_is_hashable():
layer = model.WorkingDirectory('a')
assert isinstance(layer, Hashable)
def entrypoint_is_hashable():
layer = model.EntryPoint(('a', 'b', 'c'))
assert isinstance(layer, Hashable)
def cmd_is_hashable():
layer = model.SetCommand(('a', 'b', 'c'))
assert isinstance(layer, Hashable)
def describe_image():
def image_requires_name_and_base():
img = model.Image('name', model.FromLayer('base'), [])
assert img.name == 'name'
assert img.from_ == model.FromLayer('base')
assert len(img.layers) == 0
def can_append_layers_to_image():
img = model.Image('-', model.FromLayer('-'), [model.FromLayer('base')])
img.append(model.WorkingDirectory('abc'))
assert img.layers == [
model.FromLayer('base'),
model.WorkingDirectory('abc')
]
def can_prepend_layers_to_image():
img = model.Image(
'-', model.FromLayer('-'), [model.WorkingDirectory('abc')]
)
img.prepend(model.FromLayer('base'))
assert img.layers == [
model.FromLayer('base'),
model.WorkingDirectory('abc')
]
def is_hashable():
layers = [
model.FromLayer('a'),
model.AddLay | er(('a', 'b', 'c'), 'd'),
model.CopyLayer(('a', 'b', 'c'), 'd'),
model.RunLayer('cmd', ('a', 'b', 'c'), dict(a='a', b='b', c='c')),
model.ContextLayer(lambda: None),
model.UpdateEnv(dict(a='a', b='b', c='c')),
model.WorkingDirectory('a'),
model.EntryPoint(('a', 'b', 'c')),
model.SetCommand(('a', 'b', 'c'))
]
img = model.Image('-', model.FromLayer('-'), layers)
assert isinsta | nce(img, Hashable)
|
nachandr/cfme_tests | cfme/automate/dialogs/__init__.py | Python | gpl-2.0 | 4,517 | 0.000443 | from widgetastic.widget import Text
from widgetastic.widget import View
from widgetastic_patternfly import Button
from widgetastic_patternfly import Dropdown
from widgetastic_patternfly import Input
from cfme.common import BaseLoggedInPage
from widgetastic_manageiq import Accordion
from widgetastic_manageiq import DialogButton
from widgetastic_m | anageiq import DialogElement
from widgetastic_manageiq import DragandDropElements
from widgetastic_manageiq import ManageIQTree
class AutomateCustomizationView(BaseLoggedInPage):
@property
def in_customization(self):
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ["Automation", "Automate", | "Customization"]
)
@property
def is_displayed(self):
return self.in_customization and self.configuration.is_displayed
@View.nested
class service_dialogs(Accordion): # noqa
ACCORDION_NAME = 'Service Dialogs'
tree = ManageIQTree()
configuration = Dropdown('Configuration')
class DialogForm(AutomateCustomizationView):
title = Text('//div[@id= "main-content"]//h1')
sub_title = Text('//div[@id= "main-content"]//h2')
element = DialogElement()
label = Input(id='name')
description = Input(id="description")
save = Button('Save')
cancel = Button('Cancel')
class AddDialogView(DialogForm):
create_tab = Text(locator='.//li/a[contains(@class, "create-tab")]')
@property
def is_displayed(self):
expected_title = (
"Automate Customization"
if self.browser.product_version < "5.11"
else "Add a new Dialog"
)
return (
self.in_customization
and self.title.text == expected_title
and self.sub_title.text == "General"
and self.create_tab.is_displayed
)
class EditDialogView(DialogForm):
save_button = Button('Save')
cancel_button = Button('Cancel')
@property
def is_displayed(self):
obj = self.context["object"]
expected_title = (
"Automate Customization"
if self.browser.product_version < "5.11"
else f'Editing {obj.label} Service Dialog'
)
return (
self.in_customization
and self.title.text == expected_title
and self.sub_title.text == "General"
and self.label.read() == obj.label
)
class CopyDialogView(DialogForm):
save_button = Button('Save')
cancel_button = Button('Cancel')
@property
def is_displayed(self):
obj = self.context["object"]
expected_label = f'Copy of {obj.label}'
expected_title = (
"Automate Customization"
if self.browser.product_version < "5.11"
else f'Editing {obj.label} Service Dialog'
)
return (
self.in_customization
and self.title.text == expected_title
and self.sub_title.text == "General"
and self.label.read() == expected_label
)
class TabForm(AddDialogView):
tab_label = Input(id='label')
tab_desc = Input(name="description")
save_button = DialogButton('Save')
cancel_button = DialogButton('Cancel')
class AddTabView(TabForm):
box = Text(locator='.//div[contains(@class, "panel-heading")]/strong')
add_section = Text(locator='.//div/i[normalize-space(.)="fa-plus-circle"]')
new_tab = Text(locator='.//a[normalize-space(.)="New tab"]')
edit_tab = Text(locator='.//a[normalize-space(.)="New tab"]'
'/i[contains(@class, "pficon-edit")]')
@property
def is_displayed(self):
return self.in_customization and self.box.is_displayed
class BoxForm(AddTabView):
box_label = Input(id='label')
box_desc = Input(name="description")
save_button = DialogButton('Save')
cancel_button = DialogButton('Cancel')
class AddBoxView(BoxForm):
"""AddBox View."""
component = Text(
locator='.//div[normalize-space(.)="Drag items here to add to the dialog. At '
'least one item is required before saving"]')
dd = DragandDropElements()
new_box = Text(locator='.//div[normalize-space(.)="New section"]')
edit_box = Text(locator='.//div[normalize-space(.)="New section"]'
'/i[contains(@class, "pficon-edit")]')
@property
def is_displayed(self):
return self.in_customization and self.component.is_displayed
|
neilLasrado/frappe | frappe/patches/v7_1/set_backup_limit.py | Python | mit | 303 | 0.013201 | from __future__ import unicode_literals
from frappe.ut | ils import cint
import frappe
def execute():
backup_limit = frappe.db.ge | t_single_value('System Settings', 'backup_limit')
if cint(backup_limit) == 0:
frappe.db.set_value('System Settings', 'System Settings', 'backup_limit', 3)
|
HurricaneLabs/check_splunk | splunk.py | Python | mit | 13,554 | 0.003984 | import datetime
import requests
import time
import xml.etree.ElementTree as ET
class TimeoutError(Exception):
pass
class ApiError(Exception):
pass
def parse_skey(skey):
if len(skey) == 0:
# Just a value
value = skey.text
else:
child = skey[0] # Should only have one child
if child.tag == "{http://dev.splunk.com/ns/rest}dict":
value = parse_sdict(child)
elif child.tag == "{http://dev.splunk.com/ns/rest}list":
value = parse_slist(child)
return (skey.get("name"), value)
def parse_sdict(sdict):
parsed = dict()
for skey in sdict.iterfind("./{http://dev.splunk.com/ns/rest}key"):
(k,v) = parse_skey(skey)
parsed[k] = v
return parsed
def parse_slist(slist):
parsed = list()
for child in slist:
parsed.append(child.text)
return parsed
class SplunkServer(object):
def __init__(self, hostname, username, password, port=8089, use_ssl=True, timeout=30, **kwargs):
self.session = requests.Session()
self.server = "%s://%s:%d" % ("https" if use_ssl else "http", hostname, int(port))
self.session.auth = (username, password)
self.cache = dict()
self.timeout = int(timeout)
def _get_url(self, url, cache=True, **kwargs):
if "urls" not in self.cache:
self.cache["urls"] = dict()
url = url.format(**kwargs)
if url in self.cache["urls"]:
return self.cache["urls"][url]
url = "%s%s" % (self.server, url)
r = self.session.get(url, verify=False)
if cache:
self.cache["urls"][url] = ET.fromstring(r.text)
self.check_for_error(self.cache["urls"][url])
return self.cache["urls"][url]
def check_for_error(self, root):
msg = root.find("./messages/msg[@type='ERROR']")
if msg is not None:
raise ApiError(msg.text.strip())
def _run_searc | h(self, search, as_list=False, **kwargs):
| url = "{0}{1}".format(self.server, "/services/search/jobs")
data = dict()
data.update(kwargs)
data["search"] = "search {0}".format(search)
# Get the search ID
r = self.session.post(url, data=data, verify=False)
xml = ET.fromstring(r.text)
#print r.text
sid = xml.find("./sid").text
# Wait for search to complete
url = "{0}{1}".format(self.server, "/services/search/jobs/{0}/results".format(sid))
timeout = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout)
while datetime.datetime.now() < timeout:
r = self.session.get(url)
if r.status_code == 200:
break
time.sleep(1)
# Have results
if r.status_code == 204:
raise TimeoutError
xml = ET.fromstring(r.text)
def generate():
for result in xml.iterfind("./result"):
r = dict()
for field in result.iterfind("./field"):
r[field.get("k")] = field.find("./value/text").text
yield r
if as_list:
return list(generate())
else:
return generate()
@property
def isTrial(self):
root = self._get_url("/servicesNS/nobody/system/server/info")
sdict = root.find("./{http://www.w3.org/2005/Atom}entry/{http://www.w3.org/2005/Atom}content/{http://dev.splunk.com/ns/rest}dict")
skey = sdict.find("./{http://dev.splunk.com/ns/rest}key[@name='isTrial']")
return bool(skey.text == "0")
@property
def isFree(self):
root = self._get_url("/servicesNS/nobody/system/server/info")
sdict = root.find("./{http://www.w3.org/2005/Atom}entry/{http://www.w3.org/2005/Atom}content/{http://dev.splunk.com/ns/rest}dict")
skey = sdict.find("./{http://dev.splunk.com/ns/rest}key[@name='isFree']")
return bool(skey.text == "0")
@property
def licenses(self):
root = self._get_url("/services/licenser/licenses")
for entry in root.iterfind("./{http://www.w3.org/2005/Atom}entry"):
sdict = entry.find("./{http://www.w3.org/2005/Atom}content/{http://dev.splunk.com/ns/rest}dict")
yield parse_sdict(sdict)
@property
def valid_enterprise_licenses(self):
for license in self.licenses:
if license["status"] == "VALID" and license["type"] == "enterprise":
yield license
@property
def license_pools(self):
root = self._get_url("/services/licenser/pools")
for entry in root.iterfind("./{http://www.w3.org/2005/Atom}entry"):
sdict = entry.find("./{http://www.w3.org/2005/Atom}content/{http://dev.splunk.com/ns/rest}dict")
yield parse_sdict(sdict)
@property
def license_slave_info(self):
root = self._get_url("/services/licenser/localslave")
return parse_sdict(root.find("./{http://www.w3.org/2005/Atom}entry[1]/{http://www.w3.org/2005/Atom}content/{http://dev.splunk.com/ns/rest}dict"))
@property
def jobs(self):
root = self._get_url("/services/search/jobs")
for entry in root.iterfind("./{http://www.w3.org/2005/Atom}entry"):
sdict = entry.find("./{http://www.w3.org/2005/Atom}content/{http://dev.splunk.com/ns/rest}dict")
yield parse_sdict(sdict)
@property
def running_jobs(self):
for job in self.jobs:
if job["dispatchState"] == "RUNNING":
yield job
@property
def search_peers(self):
root = self._get_url("/services/search/distributed/peers")
for entry in root.iterfind("./{http://www.w3.org/2005/Atom}entry"):
sdict = entry.find("./{http://www.w3.org/2005/Atom}content/{http://dev.splunk.com/ns/rest}dict")
yield parse_sdict(sdict)
@property
def tcp_outputs(self):
root = self._get_url("/services/data/outputs/tcp/server")
for entry in root.iterfind("./{http://www.w3.org/2005/Atom}entry"):
sdict = entry.find("./{http://www.w3.org/2005/Atom}content/{http://dev.splunk.com/ns/rest}dict")
yield parse_sdict(sdict)
@property
def cluster_config(self):
root = self._get_url("/services/cluster/config")
return parse_sdict(root.find("./{http://www.w3.org/2005/Atom}entry[1]/{http://www.w3.org/2005/Atom}content/{http://dev.splunk.com/ns/rest}dict"))
@property
def cluster_buckets(self):
root = self._get_url("/services/cluster/master/buckets")
for entry in root.iterfind("./{http://www.w3.org/2005/Atom}entry"):
sdict = entry.find("./{http://www.w3.org/2005/Atom}content/{http://dev.splunk.com/ns/rest}dict")
yield parse_sdict(sdict)
@property
def cluster_peers(self):
root = self._get_url("/services/cluster/master/peers")
for entry in root.iterfind("./{http://www.w3.org/2005/Atom}entry"):
sdict = entry.find("./{http://www.w3.org/2005/Atom}content/{http://dev.splunk.com/ns/rest}dict")
yield parse_sdict(sdict)
@property
def deployment_clients(self):
root = self._get_url("/servicesNS/nobody/system/admin/deploymentserver/default/default.Clients?count=-1")
if root.find("messages") != None:
root = self._get_url("/services/deployment/server/clients?count=0")
for entry in root.iterfind("./{http://www.w3.org/2005/Atom}entry"):
sdict = entry.find("./{http://www.w3.org/2005/Atom}content/{http://dev.splunk.com/ns/rest}dict")
d = parse_sdict(sdict)
if "lastPhoneHomeTime" in d:
d["phoneHomeTime"] = datetime.datetime.fromtimestamp(int(d["lastPhoneHomeTime"])).strftime('%a %b %d %H:%M:%S %Y')
yield d
@property
def cluster_slave_info(self):
root = self._get_url("/services/cluster/slave/info")
return parse_sdict(root.find("./{http://www.w3.org/2005/Atom}entry[1]/{http://www.w3.org/2005/Atom}content/{http://dev.splunk.com/ns/rest}dict"))
@property
def distributed_search_peers(self):
root = self._get_url("/servicesNS/-/launcher/search |
honahursey/pyFDA | pyfda/filter_design/cheby1.py | Python | apache-2.0 | 9,053 | 0.016792 | # -*- coding: utf-8 -*-
"""
Design Chebychev 1 filters (LP, HP, BP, BS) with fixed or minimum order, return
the filter design in zpk (zeros, poles, gain) format
Attention:
This class is re-instantiated dynamically everytime the filter design method
is selected, calling the __init__ method.
Author: Christian Münker
"""
from __future__ import print_function, division, unicode_literals
import scipy.signal as sig
from scipy.signal import cheb1ord
import numpy as np
from pyfda.pyfda_lib import save_fil
__version__ = "1.0"
frmt = 'zpk' # output format of filter design routines 'zpk' / 'ba' / 'sos'
class cheby1(object):
def __init__(self):
self.name = {'cheby | 1':'Chebychev 1'}
# common messages for all man. / min. filter order response types:
msg_man = ("Enter the filter order <b><i>N</i></b> and the critical frequency "
" or frequencies <b><i>F<sub>C</sub></i></b> where the gain first drops below "
"the maximum ripple "
| "<b><i>-A<sub>PB</sub></i></b> allowed below unity gain in the "
" passband.")
msg_min = ("Enter maximum pass band ripple <b><i>A<sub>PB</sub></i></b>, "
"minimum stop band attenuation <b><i>A<sub>SB</sub> </i></b>"
" and the corresponding corner frequencies of pass and "
"stop band(s), <b><i>F<sub>PB</sub></i></b> and "
"<b><i>F<sub>SB</sub></i></b> .")
# VISIBLE widgets for all man. / min. filter order response types:
vis_man = ['fo','fspecs','tspecs'] # manual filter order
vis_min = ['fo','fspecs','tspecs'] # minimum filter order
# DISABLED widgets for all man. / min. filter order response types:
dis_man = [] # manual filter order
dis_min = ['fspecs'] # minimum filter order
# common PARAMETERS for all man. / min. filter order response types:
par_man = ['N', 'f_S', 'F_C', 'A_PB'] # manual filter order
par_min = ['f_S', 'A_PB', 'A_SB'] # minimum filter order
# Common data for all man. / min. filter order response types:
# This data is merged with the entries for individual response types
# (common data comes first):
self.com = {"man":{"vis":vis_man, "dis":dis_man, "msg":msg_man, "par":par_man},
"min":{"vis":vis_min, "dis":dis_min, "msg":msg_min, "par":par_min}}
self.ft = 'IIR'
self.rt = {
"LP": {"man":{"par":[]},
"min":{"par":['F_PB','F_SB']}},
"HP": {"man":{"par":[]},
"min":{"par":['F_SB','F_PB']}},
"BP": {"man":{"par":['F_C2']},
"min":{"par":['F_SB','F_PB','F_PB2','F_SB2']}},
"BS": {"man":{"par":['F_C2']},
"min":{"par":['F_PB','F_SB','F_SB2','F_PB2']}}
}
self.info = """
**Chebychev Type 1 filters**
have a constant ripple :math:`A_PB` in the passband(s) only, the stopband
drops monotonously. This is achieved by placing an `N`-fold zero at :math:`z=-1`.
For the filter design, the order :math:`N`, the passband ripple :math:`A_PB` and
the critical frequency / frequencies :math:`F_C` where the gain drops below
:math:`-A_PB` have to be specified.
The attenuation in the stop band can only be controlled by the filter order.
The ``cheb1ord()`` helper routine calculates the minimum order :math:`N` and the
critical passband frequency :math:`F_C` from passband / stopband specifications.
**Design routines:**
``scipy.signal.cheby1()``
``scipy.signal.cheb1ord()``
"""
self.info_doc = []
self.info_doc.append('cheby1()\n========')
self.info_doc.append(sig.cheby1.__doc__)
self.info_doc.append('cheb1ord()\n==========')
self.info_doc.append(sig.cheb1ord.__doc__)
def get_params(self, fil_dict):
"""
Translate parameters from filter dictionary to instance
parameters, scaling / transforming them if needed.
"""
self.analog = False # set to True for analog filters
self.N = fil_dict['N']
# Frequencies are normalized to f_Nyq = f_S/2 !
self.F_PB = fil_dict['F_PB'] * 2
self.F_SB = fil_dict['F_SB'] * 2
self.F_C = fil_dict['F_C'] * 2
self.F_PB2 = fil_dict['F_PB2'] * 2
self.F_SB2 = fil_dict['F_SB2'] * 2
self.F_C2 = fil_dict['F_C2'] * 2
self.F_PBC = None
self.A_PB = fil_dict['A_PB']
self.A_SB = fil_dict['A_SB']
def save(self, fil_dict, arg):
"""
Convert between poles / zeros / gain, filter coefficients (polynomes)
and second-order sections and store all available formats in the global
database.
"""
save_fil(fil_dict, arg, frmt, __name__)
if self.F_PBC is not None: # has corner frequency been calculated?
fil_dict['N'] = self.N # yes, update filterbroker
# print("====== cheby1.save ========\nF_PBC = ", self.F_PBC, type(self.F_PBC))
# print("F_PBC vor", self.F_PBC, type(self.F_PBC))
if np.isscalar(self.F_PBC): # HP or LP - a single corner frequency
fil_dict['F_C'] = self.F_PBC / 2.
else: # BP or BS - two corner frequencies
fil_dict['F_C'] = self.F_PBC[0] / 2.
fil_dict['F_C2'] = self.F_PBC[1] / 2.
#------------------------------------------------------------------------------
#
# DESIGN ROUTINES
#
#------------------------------------------------------------------------------
# HP & LP
# self.save(fil_dict, iirdesign(self.F_PB, self.F_SB, self.A_PB,
# self.A_SB, analog=False, ftype='cheby1', output=frmt))
# BP & BS:
# self.save(fil_dict, iirdesign([self.F_PB,self.F_PB2], [self.F_SB,self.F_SB2],
# self.A_PB, self.A_SB, analog=False, ftype='cheby1', output=frmt))
# LP: F_PB < F_SB ---------------------------------------------------------
def LPmin(self, fil_dict):
self.get_params(fil_dict)
self.N, self.F_PBC = cheb1ord(self.F_PB,self.F_SB, self.A_PB,self.A_SB,
analog = self.analog)
self.save(fil_dict, sig.cheby1(self.N, self.A_PB, self.F_PBC,
btype='low', analog = self.analog, output = frmt))
def LPman(self, fil_dict):
self.get_params(fil_dict)
self.save(fil_dict, sig.cheby1(self.N, self.A_PB, self.F_C,
btype='low', analog = self.analog, output = frmt))
# HP: F_SB < F_PB ---------------------------------------------------------
def HPmin(self, fil_dict):
self.get_params(fil_dict)
self.N, self.F_PBC = cheb1ord(self.F_PB,self.F_SB, self.A_PB,self.A_SB,
analog = self.analog)
self.save(fil_dict, sig.cheby1(self.N, self.A_PB, self.F_PBC,
btype='highpass', analog = self.analog, output = frmt))
def HPman(self, fil_dict):
self.get_params(fil_dict)
self.save(fil_dict, sig.cheby1(self.N, self.A_PB, self.F_C,
btype='highpass', analog = self.analog, output = frmt))
# For BP and BS, A_PB, F_PB and F_stop have two elements each:
# BP: F_SB[0] < F_PB[0], F_SB[1] > F_PB[1] --------------------------------
def BPmin(self, fil_dict):
self.get_params(fil_dict)
self.N, self.F_PBC = cheb1ord([self.F_PB, self.F_PB2],
[self.F_SB, self.F_SB2], self.A_PB,self.A_SB, analog = self.analog)
self.save(fil_dict, sig.cheby1(self.N, self.A_PB, self.F_PBC,
btype='bandpass', analog = self.analog, output = frmt))
def BPman(self, fil_dict):
self.get_params(fil_dict)
self.save(fil_dict, sig.cheby1(self.N, self.A_PB,[self.F_C,self.F_C2],
btype='bandpass', analog = self.analog, output = frmt))
# BS: F_SB[0] > F_PB[0], F_SB[1] < F_PB[1] --------------------------------
def BSmin(self, fil_dict):
self.get_params(fil_dict)
self.N, self.F_PBC = cheb1o |
redsh/Tasks-In-A-Bottle | examples/dummy.py | Python | mit | 641 | 0.095164 | import ltask,json
from ltask import pset
class Task(ltask.Task):
#def __init__(self,params):
#super(ltask.Task,self).__init__(self,params)
def transform_params(self,p): #called in ltask.Task processed_params
return p
def out_dir(self):
return './examples/out/'
def kill(self):
pass
def run(self):
p = self.processed_params()
open(p['output_dir']+'/params.json','w').write(json.dumps(self.params,indent=5))
print(p)
def scan():
|
P = pset('a',[1,2,3,4,5,6,7])*pset('b',['a','b' | ,'c','d','e']) + pset('x',[1000,10001])
P.name = 'dummy1'
return P
|
jiaphuan/models | research/object_detection/builders/hyperparams_builder.py | Python | apache-2.0 | 6,261 | 0.004951 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function to construct tf-slim arg_scope for convolution, fc ops."""
import tensorflow as tf
from object_detection.protos import hyperparams_pb2
slim = tf.contrib.slim
def build(hyperparams_config, is_training):
"""Builds tf-slim arg_scope for convolution ops based on the config.
Returns an arg_scope to use for convolution ops containing weights
initializer, weights regularizer, activation function, batch norm function
and batch norm parameters based on the configuration.
Note that if the batch_norm parameteres are not specified in the config
(i.e. left to default) then batch norm is excluded from the arg_scope.
The batch norm parameters are set for updates based on `is_training` argument
and conv_hyperparams_config.batch_norm.train parameter. During training, they
are updated only if batch_norm.train parameter is true. However, during eval,
no updates are made to the batch norm variables. In both cases, their current
values are used during forward pass.
Args:
hyperparams_config: hyperparams.proto object containing
hyperparameters.
is_training: Whether the network is in training mode.
Returns:
arg_scope: tf-slim arg_scope containing hyperparameters for ops.
Raises:
ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
"""
if not isinstance(hyperparams_config,
hyperparams_pb2.Hyperparams):
raise ValueError('hyperparams_config not of type '
'hyperparams_pb.Hyperparams.')
batch_norm = None
batch_norm_params = None
if hyperparams_config.HasField('batch_norm'):
batch_norm = slim.batch_norm
batch_norm_params = _build_batch_norm_params(
hyperparams_config.batch_norm, is_training)
affected_ops = [slim.conv2d, slim.separable_conv2d, slim.conv2d_transpose]
if hyperparams_config.HasField('op') and (
hyperparams_config.op == hyperparams_pb2.Hyperparams.FC):
affected_ops = [slim.fully_connected]
with slim.arg_scope(
affected_ops,
weights_regularizer=_build_regularizer(
hyperparams_config.regularizer),
weights_initializer=_build_initializer(
hyperparams_config.initializer),
activation_fn=_build_activation_fn(hyperparams_config.activation),
normalizer_fn=batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
def _build_activation_fn(activation_fn):
"""Builds a callable activation from config.
Args:
activation_fn: hyperparams_pb2.Hyperparams.activation
Returns:
Callable activation function.
Raises:
ValueError: On unknown activation function.
"""
if activation_fn == hyperparams_pb2.Hyperparams.NONE:
return None
if activation_fn == hyperparams_pb2.Hyperparams.RELU:
return tf.nn.relu
if activation_fn == hyperparams_pb2.Hyperparams.RELU_6:
return tf.nn.relu6
raise ValueError('Unknown activation function: {}'.format(activation_fn))
def _build_regularizer(regularizer):
"""Builds a tf-slim regularizer from config.
Args:
regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
tf-slim regularizer.
Raises:
ValueError: On unknown regularizer.
"""
regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
if regularizer_oneof == 'l1_regularizer':
return slim.l1_regularizer(scale=float(regularizer.l1_regularizer.weight))
if regularizer_oneof == 'l2_regularizer':
return slim.l2_regularizer(scale=float(regularizer.l2_regularizer.weight))
raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))
def _build_initializer(initializer):
"""Build a tf initializer from config.
Args:
initializer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
tf initializer.
Raises:
ValueError: On unknown initializer.
"""
initializer_oneof = initializer.WhichOneof('initializer_oneof')
if initializer_oneof == 'truncated_normal_initializer':
return tf.truncated_normal_initializer(
mean=initializer.truncated_normal_initializer.mean,
stddev=initializer.truncated_normal_initializer.stddev)
if initializer_oneof == 'random_normal_initializer':
return tf.random_normal_initializer(
mean=initializer.random_normal_initializer.mean,
stddev=initializer.random_normal_initializer.stddev)
if initializer_oneof == 'variance_scaling_initializer':
enum_descriptor = (hyperparams_pb2.VarianceScalingInitializer.
DESCRIPTOR.enum_types_by_name['Mode'])
mode = enum_descriptor.values_by_number[initializer.
variance_scaling_initializer.
mode].name
return slim.variance_scaling_initializer(
factor=initializer.variance_scaling_initializer.factor,
mode=mode,
uniform=initializer.variance_scaling_initializer.uniform)
raise ValueError('Unknown initializer function: {}'.format(
initializer_oneof))
def _build_batch_norm_params(batch_norm, is_training):
"""Build a dictionary of batch_norm params from config.
Args:
batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
is_t | raining: Whether the models is in training mode.
Returns:
A dictionary containing batch_norm parameters.
"""
batch_norm_params = {
'decay': batch_norm.decay,
'center': batch_norm.center,
'scale': batch_norm.scale,
'epsilon': batch_ | norm.epsilon,
'is_training': is_training and batch_norm.train,
}
return batch_norm_params
|
cisba/cloudsignaturebot | cloudsignaturebot.py | Python | lgpl-3.0 | 20,001 | 0.00945 | """This is the Cloud Signature Bot based on Time4Mind and Telegram
It allow to sign documents using a Telegram chat and a Time4Mind account
"""
import sys
import os
import yaml
import logging
import time
import datetime
import uuid
import urllib.request
import shutil
import re
import magic
import json
from threading import Thread
from queue import Queue
from time4mind import Time4Mind
from telegram.ext import Updater, CommandHandler
from telegram.ext import MessageHandler, Filters
from telegram import Bot
from flask import Flask, jsonify, abort, make_response, request
from pkboxsoap import PkBoxSOAP
# methods for a "poor man" data persistence based on a yaml file
def acl_load():
try:
with open(cfg['acl'], 'r') as yml_file: acl = yaml.load(yml_file)
except:
logging.warning("failed to read acl file: " + str(cfg['acl']))
acl = dict()
return acl
def acl_update(user_info):
acl = acl_load()
if user_info['id'] not in acl:
acl[user_info['id']] = dict()
for k in user_info:
acl[user_info['id']][k] = user_info[k]
acl_dump(acl)
def acl_dump(acl):
try:
with open(cfg['acl'], 'w+') as yml_file: yml_file.write(yaml.dump(acl))
#logging.info(yaml.dump(acl))
except:
logging.critical("error writing acl file: " + str(cfg['acl']))
def acl_set_status(user_id,status):
acl = acl_load()
if user_id not in acl:
logging.error('user_id ' + str(user_id) + 'not found in acl file:' \
+ str(cfg['acl']))
return None
acl[user_id]['status'] = status
acl_dump(acl)
def acl_get_user_info(user_id):
acl = acl_load()
| if user_id not in acl:
return None
return acl[user_id]
# queue consumer
def proces | s_queue(args):
(queue, bot, acl_set_status) = args
while True:
q_msg = queue.get()
logging.info('queue.get() : ' + repr(q_msg))
# auth transaction
if q_msg['type'] == "authorization":
transaction = q_msg['content']
acl_set_status(q_msg['chat_id'],"authorized")
message = 'You have been authorized. Now send me a file to sign!'
try:
bot.sendMessage(chat_id=q_msg['chat_id'], text=message)
except:
logging.warning('error sending auth confirmation for transaction '\
+ '\ncontent: ' + str(transaction) \
+ '\nbot: ' + str(bot) \
+ '\nchat_id: ' + str(q_msg['chat_id']) \
+ '\nuser_id: ' + str(q_msg['user_id']) )
else:
logging.info('authorized user: ' + str(q_msg['user_id']))
# sign transaction
elif q_msg['type'] == "signature":
# retrive file info
operation_uuid4 = q_msg['operation_uuid4']
yml_pathname = cfg['storage'] + '/' + operation_uuid4 + '.yml'
try:
with open(yml_pathname, 'r') as yml_file:
docs = yaml.load(yml_file)
#logging.info(repr(docs))
except:
logging.warning('error retriving saved info for operation: '\
+ operation_uuid4 \
+ " from " + yml_pathname)
else:
logging.info("process_queue() operation " + operation_uuid4 \
+ " retrived info from " + yml_pathname)
# setup transaction signing otp
transaction = q_msg['content']
#bot.sendMessage(chat_id=q_msg['chat_id'], text=str(transaction))
try:
received_otp = json.loads(transaction['otp'])
except Exception as inst:
logging.debug(inst.args)
sign_otp = dict()
sign_otp['KeyPIN'] = received_otp['KeyPIN']
sign_otp['SessionKey'] = received_otp['SessionKey']
sign_otp['PIN'] = str(transaction['pin'])
logging.debug("process_queue() sign_otp: " + str(json.dumps(sign_otp)) )
# sign
parent_dir = cfg['storage'] + '/' + str(q_msg['chat_id'])
directory = parent_dir + '/' + operation_uuid4 + '/'
for file_item in docs['list']:
# retrive user certificate alias
user_info = acl_get_user_info(q_msg['user_id'])
signer = user_info['cred']['alias']
if 'domain' in cfg['pkbox'] and cfg['pkbox']['domain'] == "open":
signer = '[' + user_info['cred']['domain'] + ']_' + signer
# retrive file info
pathname = directory + file_item['file_id']
filetype = 'p7m'
if re.match(r'PDF document.*', magic.from_file(pathname)):
filetype = 'pdf'
# call pkbox for signing
logging.info("process_queue() operation " + operation_uuid4 \
+ " signing file: " + pathname)
#bot.sendMessage(chat_id=q_msg['chat_id'], text=str(json.dumps(sign_otp)))
result = sign_service.envelope(pathname, filetype, signer,
str(transaction['pin']),
str(json.dumps(sign_otp)))
# evaluate result
index = docs['list'].index(file_item)
if result == 'ok':
if filetype == "pdf":
docs['list'][index]['new_name'] = \
'SIGNED_' + docs['list'][index]['file_name']
else:
docs['list'][index]['new_name'] = \
docs['list'][index]['file_name'] + '.p7m'
logging.info('user ' + str(q_msg['user_id']) \
+ ' signed documents in operation: ' \
+ operation_uuid4 )
else:
docs['list'][index]['result'] = str(result)
logging.warning("envelope() returned " + str(result)
+ ' signing document for operation:'\
+ operation_uuid4)
# TODO:
# if pdfsign fail because protected with a password)
# it should return a msg to request sign it as p7m
# send message and signed files
for file_item in docs['list']:
pathname = directory + file_item['file_id']
if not 'new_name' in file_item:
message = 'Error signing file: ' + file_item['file_name'] \
+ " with result " + file_item['result']
bot.sendMessage(chat_id=q_msg['chat_id'], text=message)
elif not os.path.exists(pathname):
logging.warning("not found " + pathname)
message = 'Error reading signed file: ' + file_item['new_name']
bot.sendMessage(chat_id=q_msg['chat_id'], text=message)
else:
bot.sendDocument( chat_id=q_msg['chat_id'],
document=open(pathname, 'rb'),
filename=file_item['new_name'])
os.remove(pathname)
# remove yaml file and operation dir
os.remove(yml_pathname)
os.rmdir(directory)
# try remove also chat_id dir if empty
try:
os.rmdir(parent_dir)
except:
pass
q.task_done()
# flask webserver to handle callback
app = Flask(__name__)
# function to start webserver as a thread
def flask_thread():
if 'listenaddr' in cfg['webserver']:
listenaddr = cfg['webserver']['listenaddr']
else:
listenaddr = '127.0.0.1'
app.run(host=listenaddr,debug=True, use_reloader=False)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.route('/api/v1.0/authorize/< |
tdyas/pants | src/python/pants/help/scope_info_iterator_test.py | Python | apache-2.0 | 2,564 | 0.00039 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import unittest
from pants.help.scope_info_iterator import ScopeInfoIterator
from pants.option.global_options import GlobalOptions
from pants.option.scope import GLOBAL_SCOPE, ScopeInfo
from pants.subsystem.subsystem import Subsystem
from pants.subsystem.subsystem_client_mixin import SubsystemDependency
from pants.task.task import Task
class ScopeInfoIteratorTest(unittest.TestCase):
def test_iteration(self):
self.maxDiff = None
class Subsys1(Subsystem):
options_scope = "subsys1"
class Subsys2(Subsystem):
options_scope = "subsys2"
@classmethod
def subsystem_dependencies(cls):
return (SubsystemDependency(Subsys1, "subsys2"),)
class Goal1Task2(Task):
options_scope = "goal1.task12"
@classmethod
def subsystem_dependencies(cls):
return (SubsystemDependency(Subsys1, "goal1.task12"),)
infos = [
ScopeInfo(GLOBAL_SCOPE, ScopeInfo.GLOBAL, GlobalOptions),
ScopeInfo("subsys2", ScopeInfo.SUBSYSTEM, Subsys2),
ScopeInfo("subsys1.subsys2", ScopeInfo.SUBSYSTEM, Subsys1),
ScopeInfo("goal1", ScopeInfo.INTERMEDIATE),
ScopeInfo("goal1.task11", ScopeInfo.T | ASK),
S | copeInfo("goal1.task12", ScopeInfo.TASK, Goal1Task2),
ScopeInfo("subsys1.goal1.task12", ScopeInfo.SUBSYSTEM, Subsys1),
ScopeInfo("goal2", ScopeInfo.INTERMEDIATE),
ScopeInfo("goal2.task21", ScopeInfo.TASK),
ScopeInfo("goal2.task22", ScopeInfo.TASK),
ScopeInfo("goal3", ScopeInfo.INTERMEDIATE),
ScopeInfo("goal3.task31", ScopeInfo.TASK),
ScopeInfo("goal3.task32", ScopeInfo.TASK),
]
scope_to_infos = dict((x.scope, x) for x in infos)
it = ScopeInfoIterator(scope_to_infos)
actual = list(it.iterate({GLOBAL_SCOPE, "goal1", "goal2.task21", "goal3"}))
expected_scopes = [
GLOBAL_SCOPE,
"subsys2",
"subsys1.subsys2",
"goal1",
"goal1.task11",
"goal1.task12",
"subsys1.goal1.task12",
"goal2.task21",
"goal3",
"goal3.task31",
"goal3.task32",
]
expected_scope_infos = [scope_to_infos[x] for x in expected_scopes]
self.assertEqual(expected_scope_infos, actual)
|
disqus/pgshovel | src/main/python/pgshovel/replication/validation/__init__.py | Python | apache-2.0 | 1,442 | 0.000693 | from pgshovel.interfaces.replication_pb2 import (
State,
StreamState,
)
from pgshovel.replication.validation.bootstrap import validate_bootstrap_state
from pgshovel.replication.validation.consumers import validate_consumer_state
from pgshovel.replication.validation.transactions import validate_transaction_state
class MultipleStateValidator(object):
def __init__(self, message, validators):
self.message = message
self.validators = val | idators
def __call__(self, state, *args, **kwargs):
states = {}
for name, validator in self.validators.items():
if state is not None and state.HasField(name):
value = ge | tattr(state, name)
else:
value = None
result = validator(value, *args, **kwargs)
if result is not None:
states[name] = result
return self.message(**states)
validate_state = MultipleStateValidator(State, {
'bootstrap_state': validate_bootstrap_state,
'stream_state': MultipleStateValidator(StreamState, {
'consumer_state': validate_consumer_state,
'transaction_state': validate_transaction_state,
})
})
#: The expected types of event for a stream of transactions when there is no
#: existing ``TransactionState``.
TRANSACTION_START_EVENT_TYPES = validate_state.validators['stream_state'].validators['transaction_state'].receivers[None].keys() # noqa
|
diogocs1/comps | web/addons/account/wizard/account_validate_account_move.py | Python | apache-2.0 | 3,203 | 0.005932 | # -*- coding: utf-8 -*-
############# | #################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Soft | ware Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class validate_account_move(osv.osv_memory):
_name = "validate.account.move"
_description = "Validate Account Move"
_columns = {
'journal_ids': fields.many2many('account.journal', 'wizard_validate_account_move_journal', 'wizard_id', 'journal_id', 'Journal', required=True),
'period_ids': fields.many2many('account.period', 'wizard_validate_account_move_period', 'wizard_id', 'period_id', 'Period', required=True, domain=[('state','<>','done')]),
}
def validate_move(self, cr, uid, ids, context=None):
obj_move = self.pool.get('account.move')
if context is None:
context = {}
data = self.read(cr, uid, ids[0], context=context)
ids_move = obj_move.search(cr, uid, [('state','=','draft'),('journal_id','in',tuple(data['journal_ids'])),('period_id','in',tuple(data['period_ids']))], order='date')
if not ids_move:
raise osv.except_osv(_('Warning!'), _('Specified journals do not have any account move entries in draft state for the specified periods.'))
obj_move.button_validate(cr, uid, ids_move, context=context)
return {'type': 'ir.actions.act_window_close'}
class validate_account_move_lines(osv.osv_memory):
_name = "validate.account.move.lines"
_description = "Validate Account Move Lines"
def validate_move_lines(self, cr, uid, ids, context=None):
obj_move_line = self.pool.get('account.move.line')
obj_move = self.pool.get('account.move')
move_ids = []
if context is None:
context = {}
data_line = obj_move_line.browse(cr, uid, context['active_ids'], context)
for line in data_line:
if line.move_id.state=='draft':
move_ids.append(line.move_id.id)
move_ids = list(set(move_ids))
if not move_ids:
raise osv.except_osv(_('Warning!'), _('Selected Entry Lines does not have any account move entries in draft state.'))
obj_move.button_validate(cr, uid, move_ids, context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
phobson/bokeh | examples/howto/interactive_bubble/gapminder.py | Python | bsd-3-clause | 4,269 | 0.002577 | import pandas as pd
from jinja2 import Template
from bokeh.embed import file_html
from bokeh.layouts import column
from bokeh.models import (ColumnDataSource, Plot, Circle, Range1d, LinearAxis,
HoverTool, Text, SingleIntervalTicker, CustomJS, Slider)
from bokeh.models.annotations import Title
from bokeh.palettes import Spectral6
from bokeh.resources import JSResources
from bokeh.util.browser import view
from data import process_data
fertility_df, life_expectancy_df, population_df_size, regions_df, years, regions = process_data()
sources = {}
region_color = regions_df['region_color']
region_color.name = 'region_color'
for year in years:
fertility = fertility_df[year]
fertility.name = 'fertility'
life = life_expectancy_df[year]
life.name = 'life'
population = population_df_size[year]
population.name = 'population'
new_df = pd.concat([fertility, life, population, region_color], axis=1)
sources['_' + str(year)] = ColumnDataSource(new_df)
dictionary_of_sources = dict(zip([x for x in years], ['_%s' % | x for x in years]))
js_source_array = str(dictionary_of_sources).replace("'", "")
xdr = Range1d(1, 9)
ydr = Range1d(20, 100)
plot = Plot(
x_range=xdr,
y_range=ydr,
title=Title(text=''),
plot_width=800,
plot_height=400,
outline_line_color=None,
toolbar_location=None,
min_border=20,
)
AXIS_FORMATS = dict(
minor_tick_in=None,
minor_tick_out=N | one,
major_tick_in=None,
major_label_text_font_size="10pt",
major_label_text_font_style="normal",
axis_label_text_font_size="10pt",
axis_line_color='#AAAAAA',
major_tick_line_color='#AAAAAA',
major_label_text_color='#666666',
major_tick_line_cap="round",
axis_line_cap="round",
axis_line_width=1,
major_tick_line_width=1,
)
xaxis = LinearAxis(ticker=SingleIntervalTicker(interval=1), axis_label="Children per woman (total fertility)", **AXIS_FORMATS)
yaxis = LinearAxis(ticker=SingleIntervalTicker(interval=20), axis_label="Life expectancy at birth (years)", **AXIS_FORMATS)
plot.add_layout(xaxis, 'below')
plot.add_layout(yaxis, 'left')
# ### Add the background year text
# We add this first so it is below all the other glyphs
text_source = ColumnDataSource({'year': ['%s' % years[0]]})
text = Text(x=2, y=35, text='year', text_font_size='150pt', text_color='#EEEEEE')
plot.add_glyph(text_source, text)
# Add the circle
renderer_source = sources['_%s' % years[0]]
circle_glyph = Circle(
x='fertility', y='life', size='population',
fill_color='region_color', fill_alpha=0.8,
line_color='#7c7e71', line_width=0.5, line_alpha=0.5)
circle_renderer = plot.add_glyph(renderer_source, circle_glyph)
# Add the hover (only against the circle and not other plot elements)
tooltips = "@index"
plot.add_tools(HoverTool(tooltips=tooltips, renderers=[circle_renderer]))
# Add the legend
text_x = 7
text_y = 95
for i, region in enumerate(regions):
plot.add_glyph(Text(x=text_x, y=text_y, text=[region], text_font_size='10pt', text_color='#666666'))
plot.add_glyph(Circle(x=text_x - 0.1, y=text_y + 2, fill_color=Spectral6[i], size=10, line_color=None, fill_alpha=0.8))
text_y = text_y - 5
# Add the slider
code = """
var year = slider.get('value'),
sources = %s,
new_source_data = sources[year].get('data');
renderer_source.set('data', new_source_data);
text_source.set('data', {'year': [String(year)]});
""" % js_source_array
callback = CustomJS(args=sources, code=code)
slider = Slider(start=years[0], end=years[-1], value=1, step=1, title="Year", callback=callback, name='testy')
callback.args["renderer_source"] = renderer_source
callback.args["slider"] = slider
callback.args["text_source"] = text_source
# Stick the plot and the slider together
layout = column(plot, slider)
# Open our custom template
with open('gapminder_template.jinja', 'r') as f:
template = Template(f.read())
# Use inline resources, render the html and open
js_resources = JSResources(mode='inline')
title = "Bokeh - Gapminder Bubble Plot"
html = file_html(layout, resources=(js_resources, None), title=title, template=template)
output_file = 'gapminder.html'
with open(output_file, 'w') as f:
f.write(html)
view(output_file)
|
fedebell/Laboratorio3 | relazione2/scriptVecchi/bode.py | Python | gpl-3.0 | 1,939 | 0.016503 | import uncertainties
from uncertainties import ufloat
import math
import numpy
import numpy
import pylab
from scipy.optimize import curve_fit
import math
import scipy.stats
#Misuro a mano con il tester i valori che poi vado a mettere nel file, posso anche lasciare lo sfasamento vuoto
def linear(x, a, b):
return a*x+b
def fitPassaBasso(x, f_0):
return 1/pylab.sqrt(1/(1+(x/f_0)^2))
Vout_o, dVout_o, f_o, df_o = pylab.loadtxt('/home/federico/Laboratorio3/relazione2/datiPassaBasso.txt', unpack=True)
#Trascuriamo la resistenza in in uscita del generatore di funzioni cosi che V_in sia circa costante.
Vin = 5.0 #Misurata una volta per tutte l'ampiezza massima
dVin = 0.15
A_o = Vout_o/Vin
dA_o = A_o *pow(((dVout_o/Vout_o)**2 + (dVin/Vin)**2), 0.5)
B_o = 20 * pylab.log10(A | _o)
dB_o = 8.7*dA_o/A_o
logf_o = pylab.log10(f_o)
dlogf_o = (1/pylab.log(10))*df_o/f_o
print(dlogf_o)
print(dB_o)
pylab.figure(1)
pylab.title('Bode diagram of low-pass RC filter')
pylab.xlabel('frequency [kHz]')
pylab. | ylabel('gain [dB]')
pylab.ylim(-50, 2)
pylab.xlim(1, 7)
pylab.grid(color = "gray")
pylab.grid(color = "gray")
pylab.errorbar(logf_o, B_o, dB_o, dlogf_o, "o", color="black")
init = numpy.array([0.0, 0.0])
par_o, cov_o = curve_fit(linear, logf_o, B_o, init, pylab.sqrt(dB_o*dB_o+20.0*dlogf_o*dlogf_o))
print(par_o, cov_o)
chisq = (((dB_o - linear(logf_o, par_o[0], par_o[1]))/(pylab.sqrt(dB_o*dB_o+20.0*dlogf_o*dlogf_o)))**2).sum()
ndof = len(logf_o) - 2 #Tolgo due parametri estratti dal fit
p=1.0-scipy.stats.chi2.cdf(chisq, ndof)
print("Chisquare/ndof = %f/%d" % (chisq, ndof))
print("p = ", p)
#Routine per stampare due rette:
div = 1000
bucket = numpy.array([0.0 for i in range(div)])
retta = numpy.array([0.0 for i in range(div)])
inc = 6/div
for i in range(len(bucket)):
bucket[i]=float(i)*inc
retta[i] = linear(bucket[i], par_o[0], par_o[1])
pylab.plot(bucket, retta, color = "red")
pylab.show()
|
fusion809/python-scripts | SLE/Airy_root_finder.py | Python | gpl-3.0 | 292 | 0.034247 | #!/usr/bin/env python3
from scipy.special import airy
from numpy import abs
def f(xinput):
| x0=xinput
xoutput=x0
Ai=abs(airy(-xoutput)[0])
while Ai>1e-12:
ai=abs(airy(-xoutput))
Ai=ai[0]
Aip=ai[1]
xoutput=xoutput+Ai/Aip
return Ai, xo | utput |
rtts/connect-forever | streaming_api/socket_client.py | Python | gpl-3.0 | 337 | 0.008902 | #!/usr/bin/env python
import sys
from socket import socket
BUFSIZE = 1024
HOST, PORT = 'l | ocalhost', 8888
socket = socke | t()
socket.connect((HOST, PORT))
print "[Connected to server]"
try:
while True:
print socket.recv(BUFSIZE),
socket.send(sys.stdin.readline())
except KeyboardInterrupt:
socket.close()
print "\nGoodbye"
|
hedin/paraboard-back | paraboard/boards/migrations/0004_auto_20150415_1513.py | Python | apache-2.0 | 594 | 0.001684 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('boards', '0003_board_channel'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'ordering': ('date_created',)},
| ),
| migrations.AddField(
model_name='board',
name='color',
field=models.CharField(max_length=7, default='#FFFFFF', verbose_name='color'),
preserve_default=True,
),
]
|
naparuba/opsbro | data/core-configuration/packs/core-functions/module/node.py | Python | mit | 2,773 | 0.002524 | from opsbro.evaluater import export_evaluater_function
from opsbro.gossip import gossiper
FUNCTION_GROUP = 'gossip'
@export_evaluater_function(function_group=FUNCTION_GROUP)
def is_in_group(group):
"""**is_in_group(group)** -> return True if the node have the group, False otherwise.
* group: (string) group to check.
<code>
Example:
is_in_group('linux')
Returns:
True
</code>
"""
return gossiper.is_in_group(group)
@export_evaluater_function(function_group=FUNCTION_GROUP)
def is_in_static_group(group):
"""**is_in_static_group(group)** -> return True if the node have the group but was set in the configuration, not from discovery False otherwise.
* group: (string) group to check.
<code>
Example:
is_in_static_group('linux')
Returns:
True
</code>
"""
return gossiper.is_in_group(group)
@export_evaluater_function(function_group=FUNCTION_GROUP)
def gossip_get_zone(node_uuid=''):
"""**gossip_get_zone(node_uuid='')** -> return the zone (as string) of the node with the uuid node_uuid. If uset, get the current node.
* node_uuid: (string) uuid of the element to get zone.
<code>
Example:
gossip_get_zone()
Returns:
'internet'
</code>
"""
return gossiper.get_zone_from_node(node_uuid)
@export_evaluater_function(function_group=FUNCTION_GROUP)
def gossip_count_nodes(group='', state=''):
"""**gossip_count_nodes(group='', state='')** -> return the number of known nodes that match group and state
* group: (string) if set, count only the members of this group.
* state: (string) if set, count only the members with this state.
<code>
Example:
gossip_count_nodes(group='linux', state='ALIVE')
Returns:
3
</code>
"""
return gossiper.count(group=group, state=state)
@export_evaluater_function(function_group=FUNCTION_GROUP)
def gossip_have_event_type(event_type):
"""**gossip_have_event(event_type)** -> return True if an event of event_type is present in the node
* event_type: (string) type of event to detect.
<code>
Example:
gossip_have_event_type('shinken-restart')
Returns:
False
</code>
"""
return gossiper.have_event_type(event_type)
@export_evaluater_function(function_group=FUNCTION_GROUP)
d | ef compliance_get_state_of_rule(rule_name):
"""**compliance_ | get_state_of_rule(rule_name)** -> return the state of the rule with the name rule_name
* rule_name: (string) name of the rule to get. If wrong, state will be UNKNOWN.
<code>
Example:
compliance_get_state_of_rule('Install mongodb')
Returns:
'COMPLIANT'
</code>
"""
from opsbro.compliancemgr import compliancemgr
return compliancemgr.get_rule_state(rule_name)
|
hholzgra/maposmatic | www/maposmatic/migrations/0005_auto_20170521_0103.py | Python | agpl-3.0 | 453 | 0.002208 | # -*- coding: utf-8 -*-
from __future__ impor | t unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('maposmatic', '0004_maprenderingjob_track'),
]
operations = [
migrations.AlterField(
model_name='maprenderingjob',
name='track',
field=models.FileField | (null=True, upload_to=b'upload/tracks/', blank=True),
),
]
|
awsdocs/aws-doc-sdk-examples | python/example_code/iot/thing_performance.py | Python | apache-2.0 | 5,845 | 0.006501 | # Copyright 2010-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[thing_performance.py demonstrates how to push CPU and memory usage data to a thing's device shadow in AWS IoT.]
# snippet-service:[iot]
# snippet-sourcesyntax:[python]
# snippet-keyword:[Python]
# snippet-keyword:[AWS IoT]
# snippet-keyword:[Code Sample]
# snippet-keyword:[AWSIoTSDK]
# snippet-keyword:[AWSIoTMQTTShadowClient]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2020-01-23]
# snippet-sourceauthor:[FThompsonAWS]
# snippet-start:[iot.python.thing_performance.complete]
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
import json
import psutil
import argparse
import logging
import time
# Configures the argument parser for this program.
def configureParser():
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--endpoint", action="store", required=True, dest="host",
help="Your AWS IoT custom endpoint")
parser.add_argument("-r", "--rootCA", action="store", required=True, dest="rootCAPath", help="Root CA file path")
parser.add_argument("-c", "--cert", action="store", required=True, dest="certificatePath",
help="Certificate file path")
parser.add_argument("-k", "--key", action="store", required=True, dest="privateKeyPath",
help="Private key file path")
parser.add_argument("-p", "--port", action="store", dest="port", type=int, default=8883,
help="Port number override")
parser.add_argument("-n", "--thingName", action="store", required=True, dest="thingName",
help="Targeted thing name")
parser.add_argument("-d", "--requestDelay", action="store", dest="requestDelay", type=float, default=1,
help="Time between requests (in seconds)")
parser.add_argument("-v", "--enableLogging", action="store_true", dest="enableLogging",
help="Enable logging for the AWS IoT Device SDK for Python")
return parser
# An MQTT shadow client that uploads device performance data to AWS IoT at a regular interval.
class PerformanceShadowClient:
def __init__(self, thingName, host, port, rootCAPath, privateKeyPath, certificatePath, requestDelay):
self.thingName = thingName
self.host = host
self.port = port
self.rootCAPath = rootCAPath
self.privateKeyPath = privateKeyPath
self.certificatePath = certificatePath
self.requestDelay = requestDelay
# Updates this thing's shadow with system performance data at a regular interval.
def run(self):
print("Connecting MQTT client for {}...".format(self.thingName))
mqttClient = self.configureMQTTClient()
mqttClient.connect()
print("MQTT client for {} connected".format(self.thingName))
deviceShadowHandler = mqttClient.createShadowHandlerWithName(self.thingName, True)
print("Running performance shadow client for {}...\n".format(self.thingName))
while True:
performance = self.readPerformance()
print("[{}]".format(self.thingName))
print("CPU:\t{}%".format(performance["cpu"]))
print("Memory:\t{}%\n".format(performance["memory"]))
payload = { "state": { "reported": performance } }
deviceShadowHandler.shadowUpdate(json.dumps(payload), self.shadowUpdateCallback, 5)
time.sleep(args.requestDelay)
# Configures the MQTT shadow client for this thing.
def configureMQTTClient(self):
mqttClient = AWSIoTMQTTShadowClient(self.thingName)
mqttClient.configureEndpoint(self.host, self.port)
mqttClient.configureCredentials(self.rootCAPath, self.privateKeyPath, self.certificatePath)
mqttClient.configureAutoReconnectBackoffTime(1, 32, 20)
mqttClient.configureConnectDisconnectTimeout(10)
mqttClient.configureMQTTOperationTimeout(5)
return mqttClient
# Returns the local device's CPU usage, memory usage, and timestamp.
def readPerformance(self):
cpu = psutil.cpu_percent()
memory = psutil.virtual_memory().percent
timestamp = time.time()
return { "cpu": cpu, "memory": memory, "timestamp": timestamp }
# Prints the result of a shadow update call.
def shadowUpdateCallback(self, payload, responseStatus, token):
print("[{}]".format(self.thingName))
print("Update request {} {}\n".format(token, responseStatus))
# Configures debug logging for the AWS IoT Device SDK for Python.
def configureLogging():
logger = logging.getLogger("AWSIo | TPythonSDK.core")
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(nam | e)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
# Runs the performance shadow client with user arguments.
if __name__ == "__main__":
parser = configureParser()
args = parser.parse_args()
if (args.enableLogging):
configureLogging()
thingClient = PerformanceShadowClient(args.thingName, args.host, args.port, args.rootCAPath, args.privateKeyPath,
args.certificatePath, args.requestDelay)
thingClient.run()
# snippet-end:[iot.python.thing_performance.complete] |
fusionbox/django-widgy-blog | widgy_blog/admin.py | Python | bsd-2-clause | 5,267 | 0.001709 | from functools import partial
from django.co | ntrib import admin
from django.core.exceptions import ObjectDoesNotExist
from django.forms.models import modelform_factory
from django.contrib.admin.views.main import ChangeList
from django.forms.models import model_to_dict
from django.contrib.auth import get_user_model
from widgy.admin import WidgyAdmin
from widgy.forms import WidgyForm
from | widgy.models import Node
from .models import Blog, BlogLayout, Tag
User = get_user_model()
class IsPublishedListFilter(admin.SimpleListFilter):
title = 'Published'
parameter_name = 'is_published'
model = BlogLayout
def lookups(self, request, model_admin):
return (
('0', 'No'),
('1', 'Yes'),
)
def queryset(self, request, queryset):
if self.value() == '0':
return queryset.exclude(
content__commits__root_node__content_id__in=self.model.objects.published()
).distinct()
if self.value() == '1':
return queryset.filter(
content__commits__root_node__content_id__in=self.model.objects.published()
).distinct()
class AuthorListFilter(admin.SimpleListFilter):
title = 'Current author'
parameter_name = 'author'
def lookups(self, request, model_admin):
for user in User.objects.filter(blog_bloglayout_set__isnull=False).distinct():
yield (str(user.pk), str(user))
def queryset(self, request, queryset):
pk = self.value()
if pk:
layouts_by_this_author = BlogLayout.objects.filter(author__pk=pk)
return queryset.filter(
content__working_copy__content_id__in=layouts_by_this_author
).distinct()
class BlogForm(WidgyForm):
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance')
if instance:
try:
content = instance.content.working_copy.content
except ObjectDoesNotExist:
pass
else:
opts = self._meta
initial = model_to_dict(content, opts.fields, opts.exclude)
initial.update(kwargs.get('initial', {}))
kwargs['initial'] = initial
super(BlogForm, self).__init__(*args, **kwargs)
class BlogChangeList(ChangeList):
def get_results(self, request):
super(BlogChangeList, self).get_results(request)
# This is like prefetch_related, but works with our GenericForeignKey
Node.attach_content_instances(i.content.working_copy for i in self.result_list)
class BlogAdmin(WidgyAdmin):
form = BlogForm
layout_model = BlogLayout
# These are the fields that are actually stored in widgy, not the
# owner. We copy them back and forth to make the editing interface
# nicer.
layout_proxy_fields = [
'title',
'slug',
'date',
'author',
'image',
'summary',
'description',
'keywords',
'page_title',
'tags',
]
list_filter = [IsPublishedListFilter, AuthorListFilter]
list_display = ['title', 'author']
fieldsets = [
(None, {
'fields': [
'title', 'date', 'author', 'image', 'summary', 'content', 'tags',
],
}),
('Meta', {
'fields': ['description', 'keywords', 'slug', 'page_title'],
'classes': ['collapse', 'grp-collapse', 'collapse-closed',
'collapsed'],
}),
]
def get_queryset(self, request):
return self.model.objects.select_related('content__working_copy')
queryset = get_queryset
def get_changelist(self, *args, **kwargs):
return BlogChangeList
def get_form(self, request, obj=None, **kwargs):
# We need to get the fields for BlogLayout
defaults = {
'formfield_callback': partial(self.formfield_for_dbfield, request=request),
'form': self.form,
'fields': self.layout_proxy_fields,
}
defaults.update(kwargs)
LayoutModelForm = modelform_factory(self.layout_model, **defaults)
LayoutForm = type('BlogLayoutForm', (self.form,), LayoutModelForm.base_fields)
LayoutForm.layout_proxy_fields = self.layout_proxy_fields
kwargs['form'] = LayoutForm
return super(BlogAdmin, self).get_form(request, obj, **kwargs)
def save_model(self, request, obj, form, change):
layout_data = dict(
(k, v) for k, v in form.cleaned_data.items() if k in self.layout_proxy_fields
)
if not change:
# adding
tags = layout_data.pop('tags', [])
field = self.model._meta.get_field('content')
obj.content = field.add_root(obj, layout_data)
obj.content.working_copy.content.tags = tags
else:
# editing
content = obj.content.working_copy.content
for field_name, value in layout_data.items():
setattr(content, field_name, value)
content.save()
return super(BlogAdmin, self).save_model(request, obj, form, change)
admin.site.register(Blog, BlogAdmin)
admin.site.register(Tag, admin.ModelAdmin)
|
schleichdi2/OpenNfr_E2_Gui-6.0 | lib/python/Plugins/Extensions/MediaPortal/additions/mediatheken/atv.py | Python | gpl-2.0 | 8,745 | 0.029052 | # -*- coding: utf-8 -*-
###############################################################################################
#
# MediaPortal for Dreambox OS
#
# Coded by MediaPortal Team (c) 2013-2017
#
# This plugin is open source but it is NOT free software.
#
# This plugin may only be distributed to and executed on hardware which
# is licensed by Dream Property GmbH. This includes commercial distribution.
# In other words:
# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way
# to hardware which is NOT licensed by Dream Property GmbH.
# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way
# on hardware which is NOT licensed by Dream Property GmbH.
#
# This applies to the source code as a whole as well as to parts of it, unless
# explicitely stated otherwise.
#
# If you want to use or modify the code or parts of it,
# you have to keep OUR license and inform us about the modifications, but it may NOT be
# commercially distributed other than under the conditions noted above.
#
# As an exception regarding execution on hardware, you are permitted to execute this plugin on VU+ hardware
# which is licensed by satco europe GmbH, if the VTi image is used on that hardware.
#
# As an exception regarding modifcations, you are NOT permitted to remove
# any copy protections implemented in this plugin or change them for means of disabling
# or working around the copy protections, unless the change has been explicitly permitted
# by the original authors. Also decompiling and modification of the closed source
# parts is NOT permitted.
#
# Advertising with this plugin is NOT allowed.
# For other uses, permission from the authors is necessary.
#
###############################################################################################
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
default_cover = "file://%s/atv.png" % (config.mediaportal.iconcachepath.value + "logos")
class atvGenreScreen(MPScreen, ThumbsHelper):
def __init__(self, session):
MPScreen.__init__(self, session, skin='MP_Plugin')
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"0" : self.closeAll,
"ok" : self.keyOK,
"cancel": self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft
}, -1)
self['title'] = Label("ATV Mediathek")
self['ContentTitle'] = Label("Genre:")
self['name'] = Label(_("Please wait..."))
self.keyLocked = True
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
CoverHelper(self['coverArt']).getCover(default_cover)
self.filmliste = []
url = "http://atv.at/mediathek"
getPage(url).addCallback(self.parseData).addErrback(self.dataError)
def parseData(self, data):
parse = re.search('class="mod_programs">(.*?)/mod_programs', data, re.S)
if parse:
raw = re.findall('href="(.*?)">.*?src="(.*?)"\salt="(.*?)"', parse.group(), re.S)
if raw:
for (Url, Image, Title) in raw:
Image = Image.replace('&','&')
self.filmliste.append((decodeHtml(Title), Url, Image))
self.ml.setList(map(self._defaultlistcenter, self.filmliste))
self.keyLocked = False
self.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, 1, 1, mode=1)
self.showInfos()
def showInfos(self):
name = self['liste'].getCurrent()[0][0]
coverUrl = self['liste'].getCurrent()[0][2]
self['name'].setText(decodeHtml(name))
CoverHelper(self['coverArt']).getCover(coverUrl)
def keyOK(self):
Name = self['liste'].getCurrent()[0][0]
Link = self['liste'].getCurrent()[0][1]
self.session.open(atvListScreen, Link, Name)
class atvListScreen(MPScreen, ThumbsHelper):
def __init__(self, session, Link, Name):
self.Link = Link
self.Name = Name
MPScreen.__init__(self, session, skin='MP_PluginDescr')
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"0" : self.closeAll,
"ok" : self.keyOK,
"cancel": self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft
}, -1)
self['title'] = Label("ATV Mediathek")
self['ContentTitle'] = Label("Genre: %s" % self.Name)
self['name'] = Label(_("Please wait..."))
self['Page'] = Label(_("Page:"))
self.filmliste = []
self.handlung = ''
self.keyLocked = True
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.keyLocked = True
getPage(self.Link).addCallback(self.parseData).addErrback(self.dataError)
def parseData(self, data):
self['name'].setText(_("Please wait..."))
parse = re.search('<!--\smod_teasers\s-->(.*?)<!--\s/mod_teasers\s-->', data, re.S)
if not re.match('http://atv.at/uri/', self.Link):
handlung = re.search('<meta\sname="description"\scontent="(.*?)"', data, re.S)
if handlung:
self.handlung = handlung.group(1)
if parse:
raw = re.findall('<li class="teaser">.*?href="(.*?)".*?teaser_image_file(?:/|\%252F)(.*?jpg).*?class="title">(.*?)<', parse.group(), re.S)
if raw:
for (Url, Image, Title) in raw:
Image = 'http://atv.at/static/assets/cms/media_items/teaser_image_file/%s' % Image
self.filmliste.append((decodeHtml(Title), Url, Image))
nextpage = re.search('data-jsb="url=(.*?)" style=.*?Weitere Folgen', data, re.S)
if nextpage:
self.Link = urllib.unquote_plus(nextpage.group(1))
self.loadPage()
if len(self.filmliste) == 0:
self.filmliste.append(("No channels found.", "",""))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None ,1 ,1, mode=1)
self.showInfos()
def showInfos(self):
title = self['liste'].getCurrent()[0][0]
coverUrl = self['liste'].getCurrent()[0][2]
self['name'].setText(title)
self['handlung'].setText(decodeHtml(self.handlung))
CoverHelper(self['coverArt']).getCover(coverUrl)
def keyOK(self):
if self.keyLocked:
return
Link = self['liste'].getCurrent()[0][1]
self.keyLocked = True
getPage(Link).addCallback(self.getStreamLink).addErrback(self.dataError)
self['name'].setText(_("Please wait..."))
def getStreamLink(self, data):
Name = self['liste'].getCurrent()[0][0]
Linkliste = []
part = re.search('jsb_video/VideoPlaylist(.*?)/detail_content', data, re.S)
if part:
raw = re.findall('quot;(rtsp:\\\/\\\/109.68.230.208:1935\\\/vod\\\/_definst_\\\/.*?.mp4)', part.group(1), re.S)
if not raw:
raw = re.findall('quot;(http[s]?:\\\/\\\/(?:blocked.|)multiscreen.atv.cdn.tvnext.tv\\\/\d+\\\/\d+\\\/(?:HD|SD)\\\/hbbtv\\\/\d+(?:_\d|).mp4)', part.group(1), re.S)
if not raw:
raw = re.findall('quot;(http[s]?:\\\/\\\/(?:blocked.|)(?:multiscreen.atv.cdn.tvnext.tv|atv.at)\\\/\d+\\\/\d+\\\/(?:HD|SD)\\\/(?:\d+)(?:\\\/index)(?:_\d|).m3u8)', part.group(1), re.S)
if raw:
for Link in raw:
Link = Link.replace('\/','/').replace('blocked.',' | ').replace('blocked-','')
Streampart = "Teil %s" % str(len(Linkliste)+1)
Linkliste.append((Streampart, Link))
self.keyLocked = False
if len(Linkliste) == 1:
self.session.open(SimplePlayer, [(Name, Linkliste[0][1])], showPlaylist=False, ltype='atv')
elif len(Linkliste) >= 1:
self.session.open(atvPartScreen, Name, Linkliste)
self['name'].setText(Name)
class atvPartScreen(MPScreen):
de | f __init__(self, session, Name, Linkliste):
self.Linkliste = Linkliste
self.Name = Name
MPScreen.__init__(self, session, skin='MP_PluginDescr')
self["actions"] = ActionMap(["MP_Actions"], {
"0" : self.closeAll,
"ok" : self.keyOK,
"cancel": self.keyCancel,
"up" : self.keyUp,
"down" : self.keyDown,
}, -1)
self['title'] = Label("ATV Mediathek")
self['ContentTitle'] = Label("Genre: %s" % self.Name)
self['name'] = Label(_("Please wait..."))
s |
drakeloud/louderdev | louderdev/bin/explode.py | Python | mit | 2,470 | 0.000405 | #!/Users/Drake/dev/LouderDev/louderdev/bin/python3
#
# The Python Imaging Library
# $Id$
#
# split an animation into a number of frame files
#
from __future__ import print_function
from PIL import Image
import os
import sys
class Interval(object):
def __init__(self, interval="0"):
self.setinterval(interval)
def setinterval(self, interval):
self.hilo = []
for s in interval.split(","):
if not s.strip():
continue
try:
v = int(s)
if v < 0:
lo, hi = 0, -v
else:
lo = hi = v
except ValueError:
i = s.find("-")
lo, hi = int(s[:i]), int(s[i+1:])
self.hilo.append((hi, lo))
if not self.hilo:
self.hilo = [(sys.maxsize, 0)]
def __getitem__(self, index):
for hi, lo in self.hilo:
if hi >= index >= lo:
return 1
return 0
# --------------------------------------------------------------------
# main program
html = 0
if sys.argv[1:2] == ["-h"]:
html = 1
del sys.argv[1]
if not sys.argv[2:]:
print()
print("Syntax: python explode.py infile template [range]")
print()
print("The template argument is used to construct the names of the")
print("individual frame files. The frames are numbered file001.ext,")
print("file002.ext, etc. You can insert %d to control the placement")
print("and syntax of the frame number.")
print()
print("The optional range argument specifies which frames to extract.")
print("You can give one or more ranges like 1-10, 5, -15 etc. If")
print("omitted, all frames are extracted.")
sys.exit(1)
infile = sys.argv[1]
outfile = sys.argv[2]
frames = Interval(",".join(sys.argv[3:]))
try:
# check if outfile contains a placeholder
outfile % 1
except TypeError:
file, ext = os.path.splitex | t(outfile)
outfile = file + "%03d" + ext
ix = 1
im = Image.open(infile)
if html:
file, ext = os.path.splitext(outfile)
html = open(file+".html", "w")
html.write("<html>\n<body>\n")
while True:
if frames[ix]: |
im.save(outfile % ix)
print(outfile % ix)
if html:
html.write("<img src='%s'><br>\n" % outfile % ix)
try:
im.seek(ix)
except EOFError:
break
ix += 1
if html:
html.write("</body>\n</html>\n")
|
provoke-vagueness/reststore | setup.py | Python | mit | 2,099 | 0.002382 | #!/usr/bin/env python
from setuptools import setup
import re
import platform
import os
import sys
install_requires = ["bottle>=0.11",
"requests>=1.1.0",
"pyyaml>=0.0",
"czipfile>=1.0.0",
"prometheus-client"]
def load_version(filename='./reststore/version.py'):
"""Parse a __version__ number from a source file"""
with open(filename) as source:
text = source.read()
match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", text)
if not match:
msg = "Unable to find version number in {}".format(filename)
raise RuntimeError(msg)
version = match.group(1)
return version
setup(
name="reststore",
version=load_version(),
packages=["reststore"],
zip_safe=False,
author=" | Michael Dorman",
author_email | ="mjdorma+reststore@gmail.com",
url="https://github.com/provoke-vagueness/reststore",
description="RESTful datastore. A simple way to store large amounts of average sized files.",
long_description=open('README.rst').read(),
license="Apache Software Licence",
install_requires = install_requires,
entry_points={
'console_scripts': [
'reststore = reststore.cli:entry',
]
},
platforms=['cygwin', 'win', 'linux'],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Security',
'Topic :: System :: Monitoring'
],
test_suite="tests",
tests_require=[]
)
|
alex/invoke | tests/loader.py | Python | bsd-2-clause | 2,196 | 0.000911 | import os
import sys
from spec import Spec, skip, eq_, raises
from invoke.loader import Loader
from invoke.collection import Collection
from invoke.exceptions import CollectionNotFound
from _utils import support
class Loader_(Spec):
def exposes_discovery_root(self):
root = '/tmp/'
eq_(Loader(root=root).root, root)
def has_a_default_discovery_root(self):
eq_(Loader().root, os.getcwd())
class load_collection:
def returns_collection_object_if_name_found(self):
result = Loader(root=support).load_collection('foo')
eq_(type(result), Collection)
@raises(CollectionNotFound)
def raises_CollectionNotFound_if_not_found(self):
Loader(root=support).load_collection('nope')
@raises(ImportError)
def raises_ImportError_if_found_collection_cannot_be_imported(self):
# Instead of masking with a CollectionNotFound
Loader(root=support).load_collection('oops')
def honors_discovery_root_option(self):
skip()
def searches_towards_root_of_filesystem(self):
skip()
def defaults_to_tasks_collection(self):
"defaults to 'tasks' collection"
result = Loader(root=support + '/implicit/').load_collection()
eq_(type(result), Collection)
class find_collection:
@raises(CollectionNotFound)
def raises_CollectionNotFound_for_missing_collections(self):
result = Loader(root=support).find_collection('nope')
class update_path:
def setup(self):
self.l = Loader(root=support)
def does_not_modify_argument(self):
path = []
new_path = self.l.update_path(path)
eq_(path, [])
assert len(new_path) > 0
def inserts_self_root_parent_at_front_of_path(self):
"Inserts self.root at front of path"
eq_(self.l.update_path([])[0], self.l.root)
def does_not_insert_if_exists(self):
| "Doesn't insert self.root if it's already in the path"
new_path = self.l.update_path([self.l.root | ])
eq_(len(new_path), 1) # not 2
|
dreamibor/Algorithms-and-Data-Structures-Using-Python | practice/implementation/stack_and_queue/reverse_substrings_between_each_pair_of_parentheses.py | Python | gpl-3.0 | 1,483 | 0.005394 | """
Stack - Reverse Substrings Between Each Pair of Parentheses (medium) |
You are given a string `s` that consists of lower case English letters and
brackets.
Reverse the strings in each pair of matching parentheses, starting from the
innermost one.
Your result should not contain any brackets.
Example 3:
Input: s = "(ed(et(oc))el)"
Output: "leetcode"
Explanation: First, we reverse the substring "oc", then "etco", and finally,
the whole string.
LeetCode: https://lee | tcode-cn.com/problems/reverse-substrings-between-each-pair-of-parentheses
"""
def reverse_substring(s: str) -> str:
""" Stack
Time Complexity - O(N) - Iterate through the string only once.
Space Complexity - O(N) - For the stack.
"""
stack = []
for i, char in enumerate(s):
# Whence we encounter a right parenthesis, pop out all characters
# before a left parenthesis.
if char == ")":
temp_str = []
# Pop out all all characters before a left parenthesis.
while stack and stack[-1] != "(":
temp_str.append(stack.pop())
# Pop out the left parenthesis "(".
stack.pop()
# Add the temp string into the stack again.
stack += temp_str
else:
stack.append(char)
return "".join(stack)
if __name__ == "__main__":
s = "(ed(et(oc))el)"
print(reverse_substring(s))
s = "a(bcdefghijkl(mno)p)q"
print(reverse_substring(s)) |
jamesthechamp/zamboni | mkt/webapps/indexers.py | Python | bsd-3-clause | 19,607 | 0 | from operator import attrgetter
from django.core.urlresolvers import reverse
from django.db.models import Min
import commonware.log
from elasticsearch_dsl import F
from elasticsearch_dsl.filter import Bool
import mkt
from mkt.constants import APP_FEATURES
from mkt.constants.applications import DEVICE_GAIA
from mkt.prices.models import AddonPremium
from mkt.search.indexers import BaseIndexer
from mkt.search.utils import Search
from mkt.tags.models import attach_tags
from mkt.translations.models import attach_trans_dict
log = commonware.log.getLogger('z.addons')
class WebappIndexer(BaseIndexer):
"""Fields we don't need to expose in the results, only used for filtering
or sorting."""
hidden_fields = (
'*.raw',
'*_sort',
'popularity_*',
'trending_*',
'boost',
'owners',
'features',
# 'name' and 'description', as well as the locale variants, are only
# used for filtering. The fields that are used by the API are
# 'name_translations' and 'description_translations'.
'name',
'description',
'name_l10n_*',
'description_l10n_*',
)
"""
Bunch of ES stuff for Webapp include mappings, indexing, search.
"""
@classmethod
def search(cls, using=None):
"""
Returns a `Search` object.
We override this to use our patched version which adds statsd timing.
"""
return (Search(
using=using or cls.get_es(), index=cls.get_index(),
doc_type=cls.get_mapping_type_name())
.extra(_source={'exclude': cls.hidden_fields}))
@classmethod
def get_mapping_type_name(cls):
"""
Returns mapping type name which is used as the key in ES_INDEXES to
determine which index to use.
We override this because Webapp is a proxy model to Addon.
"""
return 'webapp'
@classmethod
def get_model(cls):
from mkt.webapps.models import Webapp
return Webapp
@classmethod
def get_mapping(cls):
doc_type = cls.get_mapping_type_name()
mapping = {
doc_type: {
# Disable _all field to reduce index size.
'_all': {'enabled': False},
'properties': {
# App fields.
'id': {'type': 'long'},
'app_slug': {'type': 'string'},
'app_type': {'type': 'byte'},
'author': {
'type': 'string',
'analyzer': 'default_icu',
'fields': {
# For exact matches. The simple analyzer allows
# for case-insensitive matching.
'raw': {'type': 'string',
'analyzer': 'exact_lowercase'},
},
},
'banner_regions': cls.string_not_indexed(),
'bayesian_rating': {'type': 'float', 'doc_values': True},
'category': cls.string_not_analyzed(),
'content_descriptors': cls.string_not_indexed(),
'content_ratings': {
'type': 'object',
'dynamic': 'true',
},
'created': {'format': 'dateOptionalTime', 'type': 'date',
'doc_values': True},
'current_version': cls.string_not_indexed(),
'default_locale': cls.string_not_indexed(),
'description': {'type': 'string',
'analyzer': 'default_icu',
'position_offset_gap': 100},
'device': {'type': 'byte'},
# The date this app was added to the escalation queue.
'escalation_date': {'format': 'dateOptionalTime',
'type': 'date', 'doc_values': True},
'features': {
'type': 'object',
'properties': dict(
('has_%s' % f.lower(), {'type': 'boolean'})
for f in APP_FEATURES)
},
'file_size': {'type': 'long'},
'guid': cls.string_not_analyzed(),
'has_public_stats': {'type': 'boolean'},
'hosted_url': cls.string_not_analyzed(),
'icon_hash': cls.string_not_indexed(),
'interactive_elements': cls.string_not_indexed(),
'installs_allowed_from': cls.string_not_analyzed(),
'is_disabled': {'type': 'boolean'},
'is_escalated': {'type': 'boolean'},
'is_offline': {'type': 'boolean'},
'is_priority': {'type': 'boolean'},
'is_rereviewed': {'type': 'boolean'},
'last_updated': {'format': 'dateOptionalTime',
'type': 'date'},
'latest_version': {
'type': 'object',
'properties': {
'status': {'type': 'byte'},
'is_privileged': {'type': 'boolean'},
'has_editor_comment': {'type': 'boolean'},
'has_info_request': {'type': 'boolean'},
'nomination_date': {'type': 'date',
'format': 'dateOptionalTime'},
'created_date': {'type': 'date',
'format': 'dateOptionalTime'},
},
},
'manifest_url': cls.string_not_analyzed(),
'modified': {'format': 'dateOptionalTime',
'type': 'date'},
# Name for searching. This is a list of all the localized
# names for the app. We add "position_offset_gap" to work
# around the fact that ES stores the same list of tokens as
# if this were a single string. The offset gap adds 100
# positions between each name and ensures one string from
# one name and one string from another name won't both
# match with a phrase match query.
'name': {
'type': 'string',
'analyzer': 'default_icu',
'position_offset_gap': 100,
# For exact matches. Referenced as `name.raw`.
'fields': {
'raw': cls.string_not_analyzed(
position_offset_gap=100)
},
| },
# Name for sorting.
'name_sort': cls.string_not_analyzed(doc_values=T | rue),
# Name for suggestions.
'name_suggest': {'type': 'completion', 'payloads': True},
'owners': {'type': 'long'},
'package_path': cls.string_not_indexed(),
'premium_type': {'type': 'byte'},
'previews': {
'type': 'object',
'dynamic': 'true',
},
'price_tier': cls.string_not_indexed(),
'promo_img_hash': cls.string_not_indexed(),
'ratings': {
'type': 'object',
'properties': {
'average': {'type': 'float'},
'count': {'type': 'short'},
}
},
'region_exclusions': {'type': 'short'},
'reviewed': {'format': 'dateOptionalTime', 'type': 'date',
'doc_values': True |
uccser/cs4hs | generate.py | Python | gpl-3.0 | 3,361 | 0.002678 |
""" CS4HS Website Generator
AUTHOR: Jack Morgan
REQUIRES: Python >= 3.4.1
"""
CURRENT_DIRECTORY = '.'
OUTPUT_DIRECTORY = './output/'
TEXT_FOLDER = './text/'
FOLDERS_TO_COPY = ['css', 'files', 'img', 'js']
"""Check and install dependencies"""
import pip
# Update pip if needed and install dependencies
pip.main(['install', 'pip>=7.0.3'])
pip.main(['in | stall', 'jinja2>=2.7.3'])
import os
import os.path
import shutil
import argparse
from jinja2 import Environment, FileSystemLoader
class WebsiteGenerator:
"""Object for generating CS4HS website"""
def __init__(self):
# Load files from this folder and templates folder
self.env = Environment(loader=FileSystemLoader([CURRENT_DIRECTORY, 'templates/']))
def render_html(self, template):
"""Return a rendered template"""
return self.env.get_template(template).rend | er()
def write_html(html, file):
"""Render each file to output folder"""
file_name = os.path.join(OUTPUT_DIRECTORY, file)
try:
with open(file_name, 'w', encoding='utf8') as output_file:
output_file.write(html)
print('Created {}'.format(file))
os.chmod(file_name, 0o644)
except:
print("Cannot write {0}".format(file))
def copy_files():
"""Copy all required files to destination folder"""
for folder in FOLDERS_TO_COPY:
src_folder = os.path.join(CURRENT_DIRECTORY, folder)
dest_folder = os.path.join(OUTPUT_DIRECTORY, folder)
if os.path.exists(dest_folder):
shutil.rmtree(dest_folder)
shutil.copytree(src_folder, dest_folder)
os.chmod(dest_folder, 0o2775)
apply_file_permissions_to_folder(dest_folder)
print("Copied {} folder".format(folder))
def apply_file_permissions_to_folder(folder_name):
for root, folders, files in os.walk(folder_name):
for folder in folders:
folder_path = os.path.join(root, folder)
os.chmod(folder_path, 0o2775)
for file_name in files:
file_path = os.path.join(root, file_name)
os.chmod(file_path, 0o644)
def command_line_args():
"""Setup arg parser, and add required argument handling. Return
namespace generated by parser arguments
"""
argsparser = argparse.ArgumentParser(description='CS4HS Generator Argument')
argsparser.add_argument('--pre-conference', '-p',
dest='pre_conference',
action='store_true',
help='Creates only index page for pre-conference')
return argsparser.parse_args()
def main():
"""Create template engine and process all HTML files
in the top directory"""
cmd_args = command_line_args()
website_generator = WebsiteGenerator()
if cmd_args.pre_conference:
files = ['pre-index.html']
else:
files = os.listdir(TEXT_FOLDER)
files.remove('pre-index.html')
# Render all HTML files in top directory
for file in files:
if file.endswith('.html'):
file_path = os.path.join(TEXT_FOLDER, file)
html = website_generator.render_html(file_path)
if cmd_args.pre_conference:
write_html(html, 'index.html')
else:
write_html(html, file)
copy_files()
if __name__ == "__main__":
main()
|
agdsn/sipa | sipa/model/pycroft/user.py | Python | mit | 13,407 | 0.001492 | # -*- coding: utf-8 -*-
import logging
from sipa.model.user import BaseUser
from sipa.model.finance import BaseFinanceInformation
from sipa.model.fancy_property import active_prop, connection_dependent, \
unsupported_prop, ActiveProperty, UnsupportedProperty, Capabilities
from sipa.model.misc import PaymentDetails
from sipa.model.exceptions import UserNotFound, PasswordInvalid, \
MacAlreadyExists, NetworkAccessAlreadyActive, TerminationNotPossible, UnknownError, \
ContinuationNotPossible, SubnetFull, UserNotContactableError, TokenNotFound, LoginNotAllowed
from .api import PycroftApi
from .exc import PycroftBackendError
from .schema import UserData, UserStatus
from .unserialize import UnserializationError
from .userdb import UserDB
from flask_login import AnonymousUserMixin
from flask.globals import current_app
from flask_babel import gettext
from werkzeug.local import LocalProxy
from werkzeug.http import parse_date
logger = logging.getLogger(__name__)
api: PycroftApi = LocalProxy(lambda: current_app.extensions['pycroft_api'])
class User(BaseUser):
user_data: UserData
def __init__(self, user_data: dict):
try:
self.user_data: UserData = UserData(user_data)
self._userdb: UserDB = UserDB(self)
except UnserializationError as e:
raise PycroftBackendError("Error when parsing user lookup response") from e
super().__init__(uid=str(self.user_data.id))
@classmethod
def get(cls, username):
status, user_data = api.get_user(username)
if status != 200:
raise UserNotFound
return cls(user_data)
@classmethod
def from_ip(cls, ip):
status, user_data = api.get_user_from_ip(ip)
if status != 200:
return AnonymousUserMixin()
return cls(user_data)
def re_authenticate(self, password):
self.authenticate(self.user_data.login, password)
@classmethod
def authenticate(cls, username, password):
status, result = api.authenticate(username, password)
if status != 200:
raise PasswordInvalid
user = cls.get(result['id'])
if not user.has_property('sipa_login'):
raise LoginNotAllowed
return user
can_change_password = True
def change_password(self, old, new):
status, result = api.change_password(self.user_data.id, old, new)
if status != 200:
raise PasswordInvalid
@property
def traffic_history(self):
return [{
'day': parse_date(entry.timestamp).weekday(),
'input': to_kib(entry.ingress),
'output': to_kib(entry.egress),
'throughput': to_kib(entry.ingress) + to_kib(entry.egress),
} for entry in self.user_data.traffic_history]
@active_prop
def realname(self):
return self.user_data.name
@active_prop
def birthdate(self):
return self.user_data.birthdate
@active_prop
def login(self):
return self.user_data.login
@active_prop
@connection_dependent
def ips(self):
ips = sorted(ip for i in self.user_data.interfaces for ip in i.ips)
return ", ".join(ips)
@active_prop
@connection_dependent
def mac(self):
return {'value': ", ".join(i.mac for i in self.user_data.interfaces),
'tmp_readonly': len(self.user_data.interfaces) > 1 or not self.has_property('network_access')}
# Empty setter for "edit" capability
@mac.setter
def mac(self, new_mac):
pass
def change_mac_address(self, new_mac, host_name):
# if this has been reached despite `tmp_readonly`, this is a bug.
assert len(self.user_data.int | erfaces) == 1
status, result = api.change_mac(self.user_data.id, self._tmp_password,
self.user_data.interfaces[0].id,
new_mac, host_name)
if status == 401:
raise PasswordInvalid
elif status == 400:
raise MacAlreadyExists
@active_prop
@connection_dependent
def network_access_active(s | elf):
return {'value': len(self.user_data.interfaces) > 0,
'tmp_readonly': len(self.user_data.interfaces) > 0
or not self.has_property('network_access')
or self.user_data.room is None}
@network_access_active.setter
def network_access_active(self, value):
pass
def activate_network_access(self, password, mac, birthdate, host_name):
status, result = api.activate_network_access(self.user_data.id, password, mac,
birthdate, host_name)
if status == 401:
raise PasswordInvalid
elif status == 400:
raise MacAlreadyExists
elif status == 412:
raise NetworkAccessAlreadyActive
elif status == 422:
raise SubnetFull
def terminate_membership(self, end_date):
status, result = api.terminate_membership(self.user_data.id, end_date)
if status == 400:
raise TerminationNotPossible
elif status != 200:
raise UnknownError
def estimate_balance(self, end_date):
status, result = api.estimate_balance_at_end_of_membership(self.user_data.id, end_date)
if status == 200:
return result['estimated_balance']
else:
raise UnknownError
def continue_membership(self):
status, result = api.continue_membership(self.user_data.id)
if status == 400:
raise ContinuationNotPossible
elif status != 200:
raise UnknownError
@active_prop
def mail(self):
return {'value': self.user_data.mail,
'tmp_readonly': not self.has_property('mail')}
@mail.setter
def mail(self, new_mail):
status, result = api.change_mail(self.user_data.id, self._tmp_password, new_mail,
self.user_data.mail_forwarded)
if status == 401:
raise PasswordInvalid
elif status == 404:
raise UserNotFound
@active_prop
def mail_forwarded(self):
value = self.user_data.mail_forwarded
return {'raw_value': value,
'value': gettext('Aktiviert') if value else gettext('Nicht aktiviert'),
'tmp_readonly': not self.has_property('mail')}
@mail_forwarded.setter
def mail_forwarded(self, value):
self.user_data.mail_forwarded = value
@property
def mail_confirmed(self):
confirmed = self.user_data.mail_confirmed
editable = self.has_property('mail') and self.user_data.mail and not confirmed
return ActiveProperty(
name='mail_confirmed',
value=gettext('Bestätigt') if confirmed else gettext('Nicht bestätigt'),
style='success' if confirmed else 'danger',
capabilities=Capabilities(edit=editable, delete=False))
def resend_confirm_mail(self) -> bool:
return api.resend_confirm_email(self.user_data.id)
@active_prop
def address(self):
return self.user_data.room
@active_prop
def status(self):
value, style = self.evaluate_status(self.user_data.status)
return {'value': value, 'style': style}
@active_prop
def id(self):
return {'value': self.user_data.user_id}
@unsupported_prop
def hostname(self):
raise NotImplementedError
@unsupported_prop
def hostalias(self):
raise NotImplementedError
@property
def userdb_status(self):
status = self.userdb.has_db
capabilities = Capabilities(edit=True, delete=True)
if not self.has_property("userdb"):
return UnsupportedProperty("userdb_status")
if status is None:
return ActiveProperty(name="userdb_status",
value=gettext("Datenbank nicht erreichbar"),
style='danger',
empty=True) |
KaGeN101/mantl | roles/calico/files/neutron_port_update.py | Python | apache-2.0 | 4,467 | 0.003134 | #!/usr/bin/env python
# This script updates the allowed address pairs in Neutron with the
# 'neutron port-update' command. This is required by Calico in OpenStack,
# otherwise BGP will not be working. We query OpenStack API directly to prevent
# installing any dependencies such as python-neutronclient.
#
# USAGE: script_name arg1 arg2...argN
# arg1 - Calico network, i.e. 192.168.0.0/24
# arg2...argN - VMs MAC addresses
#
# Script exit codes (for Ansible)
# 0 - port has been updated
# 1 - error
# 2 - no update to port [default]
import json
import os
import requests
import sys
def credentials():
"""Retrieves credentials"""
username = os.environ.get('OS_USERNAME')
password = os.environ.get('OS_PASSWORD')
tenant_name = os.environ.get('OS_TENANT_NAME')
auth_url = os.environ.get('OS_AUTH_URL')
if not all((username, password, tenant_name, auth_url)):
sys.stderr.write("ERROR: Unable to get Keystone credentials\n")
exit(1)
return {
'username': username,
'password': password,
'tenant_name': tenant_name,
'auth_url': auth_url
}
def get_catalog():
"""Get service catalog from Keystone with token and all endpoints"""
creds = credentials()
headers = {'Content-Type': 'application/json'}
payload = {
"auth":
{
"tenantName": creds['tenant_name'],
"passwordCredentials": {
"username": creds['username'],
"password": creds['password']
}
}
}
auth_url = creds['auth_url'] + "/tokens"
r = requests.post(auth_url, headers=headers, data=json.dumps(payload))
parsed_json = json.loads(r.text)
if not parsed_json or 'error' in parsed_json:
sys.stderr.write("ERROR: Unable to get authentication token\n")
exit(1)
return parsed_json
def get_token(catalog):
"""Get Keystone authentication token"""
return catalog['access']['token']['id']
def neutron_public_url(catalog):
"""Get Neutron publicURL"""
for i in catalog['access']['serviceCatalog']:
if i['type'] == 'network':
for endpoint in i['endpoints']:
return endpoint['publicURL']
def list_ports(token, | public_url):
"""List Neutron ports"""
headers = {'X-Auth-Token': token}
auth_url = public_url + "v2.0/ports"
r = requests.get(auth_url, headers=headers)
if r.text:
parsed_json = json.loads(r.text)
return parsed_json['ports']
else:
sys.stderr.write("ERROR: Unable to retrieve Neutron ports list\n")
exit(1)
def update_port(token, pub | lic_url, port_id, mac_address, calico_network):
"""Update Neutron port with the allowed address pairs"""
headers = {'Content-Type': 'application/json', 'X-Auth-Token': token}
payload = {
"port": {
"allowed_address_pairs": [
{
"ip_address": calico_network,
"mac_address": mac_address
}
]
}
}
auth_url = public_url + "v2.0/ports/" + port_id
r = requests.put(auth_url, headers=headers, data=json.dumps(payload))
parsed_json = json.loads(r.text)
if r.status_code != 200 or 'NeutronError' in parsed_json:
sys.stderr.write("ERROR: Unable to update port: %s\n" % parsed_json['NeutronError'])
exit(1)
else:
return r.status_code
if __name__ == "__main__":
if len(sys.argv) < 3:
sys.stderr.write("ERROR: Please run script with the correct arguments\n")
exit(1)
calico_network = sys.argv[1]
vms_mac_addresses = sys.argv[2:]
catalog = get_catalog()
token = get_token(catalog)
public_url = neutron_public_url(catalog)
ports = list_ports(token, public_url)
exit_code = 0 # no update to port
for port in ports:
port_id = port['id']
mac_address = port['mac_address']
if mac_address in vms_mac_addresses and not port['allowed_address_pairs']:
status_code = update_port(token, public_url, port_id, mac_address, calico_network)
if status_code == 200:
exit_code = 2 # port has been updated
exit(exit_code)
|
DoubleNegativeVisualEffects/gaffer | python/GafferUI/VectorDataPlugValueWidget.py | Python | bsd-3-clause | 3,631 | 0.031121 | ##########################################################################
#
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import Gaffer
import GafferUI
class VectorDataPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, **kw ) :
self.__dataWidget = GafferUI.VectorDataWidget()
GafferUI.PlugValueWidget.__init__( self, self.__dataWidget, plug, **kw )
self.__dataChangedConnection = self.__dataWidget.dataChangedSignal().connect( Gaffer.WeakMethod( self.__dataChanged ) )
self._updateFromPlug()
def vectorDataWidget( self ) :
return self.__dataWidget
def _updateFromPlug( self ) :
plug = self.getPlug()
if plug is not None :
with self.getContext() :
plugValue = plug.getValue()
if plugValue is None :
# the VectorDataWidget isn't so keen on not having data to work with,
# so we'll make an empty data of the right type.
plugValue = plug.ValueType()
self.__dataWidget.setData( plugValue )
self.__dataWidget.setEditable( self._editable() )
def __dataChanged( self, widget ) :
assert( widget is self.__dataWidget )
with Gaffer.UndoContext( self.getPlug().ancestor( Gaffer.ScriptNode.staticTypeId() | ) ) :
self.getPlug().setValue( self.__dataWidget.getData()[0] )
GafferUI.PlugValueWidget.registerType( Gaffer.BoolVectorDataPlug.staticTyp | eId(), VectorDataPlugValueWidget )
GafferUI.PlugValueWidget.registerType( Gaffer.IntVectorDataPlug.staticTypeId(), VectorDataPlugValueWidget )
GafferUI.PlugValueWidget.registerType( Gaffer.FloatVectorDataPlug.staticTypeId(), VectorDataPlugValueWidget )
GafferUI.PlugValueWidget.registerType( Gaffer.StringVectorDataPlug.staticTypeId(), VectorDataPlugValueWidget )
GafferUI.PlugValueWidget.registerType( Gaffer.V3fVectorDataPlug.staticTypeId(), VectorDataPlugValueWidget )
|
madssj/django-longer-username-and-email | longerusernameandemail/__init__.py | Python | bsd-3-clause | 288 | 0 | from django.conf import settings
def MAX_USERNAME_LENGTH( | ):
return getattr(settings, "MAX_USERNAME_LENGTH", 255)
def MAX_EMAIL_LENGTH():
return getattr(se | ttings, "MAX_EMAIL_LENGTH", 255)
def REQUIRE_UNIQUE_EMAIL():
return getattr(settings, "REQUIRE_UNIQUE_EMAIL", True)
|
ruibarreira/linuxtrail | usr/lib/python2.7/dist-packages/numpy/core/tests/test_numeric.py | Python | gpl-3.0 | 65,826 | 0.004922 | from __future__ import division, absolute_import, print_function
import sys
import platform
from decimal import Decimal
import warnings
import itertools
import numpy as np
from numpy.core import *
from numpy.random import rand, randint, randn
from numpy.testing import *
from numpy.core.multiarray import dot as dot_
class Vec(object):
def __init__(self,sequence=None):
if sequence is None:
sequence=[]
self.array=array(sequence)
def __add__(self, other):
out=Vec()
out.array=self.array+other.array
return out
def __sub__(self, other):
out=Vec()
out.array=self.array-other.array
return out
def __mul__(self, other): # with scalar
out=Vec(self.array.copy())
out.array*=other
return out
def __rmul__(self, other):
return self*other
class TestDot(TestCase):
def setUp(self):
self.A = rand(10, 8)
self.b1 = rand(8, 1)
self.b2 = rand(8)
self.b3 = rand(1, 8)
self.b4 = rand(10)
self.N = 14
def test_matmat(self):
A = self.A
c1 = dot(A.transpose(), A)
c2 = dot_(A.transpose(), A)
assert_almost_equal(c1, c2, decimal=self.N)
def test_matvec(self):
A, b1 = self.A, self.b1
c1 = dot(A, b1)
c2 = dot_(A, b1)
assert_almost_equal(c1, c2, decimal=self.N)
def test_matvec2(self):
A, b2 = self.A, self.b2
c1 = dot(A, b2)
c2 = dot_(A, b2)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecmat(self):
A, b4 = self.A, self.b4
c1 = dot(b4, A)
c2 = dot_(b4, A)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecmat2(self):
b3, A = self.b3, self.A
c1 = dot(b3, A.transpose())
c2 = dot_(b3, A.transpose())
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecmat3(self):
A, b4 = self.A, self.b4
c1 = dot(A.transpose(), b4)
c2 = dot_(A.transpose(), b4)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecvecouter(self):
b1, b3 = self.b1, self.b3
c1 = dot(b1, b3)
c2 = dot_(b1, b3)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecvecinner(self):
b1, b3 = self.b1, self.b3
c1 = dot(b3, b1)
c2 = dot_(b3, b1)
assert_almost_equal(c1, c2, decimal=self.N)
def test_columnvect1(self):
b1 = ones((3, 1))
b2 = [5.3]
c1 = dot(b1, b2)
c2 = dot_(b1, b2)
assert_almost_equal(c1, c2, decimal=self.N)
def test_columnvect2(self):
b1 = ones((3, 1)).transpose()
b2 = [6.2]
c1 = dot(b2, b1)
c2 = dot_(b2, b1)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecscalar(self):
b1 = rand(1, 1)
b2 = rand(1, 8)
c1 = dot(b1, b2)
c2 = dot_(b1, b2)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecscalar2(self):
b1 = rand(8, 1)
b2 = rand(1, 1)
c1 = dot(b1, b2)
c2 = dot_(b1, b2)
assert_almost_equal(c1, c2, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
for dim1 in dims:
for dim2 in dims:
arg1 = rand(*dim1)
arg2 = rand(*dim2)
c1 = dot(arg1, arg2)
c2 = dot_(arg1, arg2)
assert_(c1.shape == c2.shape)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecobject(self):
U_non_cont = transpose([[1., 1.], [1., 2.]])
U_cont = ascontiguousarray(U_non_cont)
x = array([Vec([1., 0.]), Vec([0., 1.])])
zeros = array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = dot(U_cont, x) - dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
class TestResize(TestCase):
def test_copies(self):
A = array([[1, 2], [3, 4]])
Ar1 = array([[1, 2, 3, 4], [1, 2, 3, 4]])
assert_equal(resize(A, (2, 4)), Ar1)
Ar2 = array([[1, 2], [3, 4], [1, 2], [3, 4]])
assert_equal(resize(A, (4, 2)), Ar2)
Ar3 = array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
assert_equal(resize(A, (4, 3)), Ar3)
def test_zeroresize(self):
A = array([[1, 2], [3, 4]])
Ar = resize(A, (0,))
assert_equal(Ar, array([]))
class TestNonarrayArgs(TestCase):
# check that non-array arguments to functions wrap them in arrays
def test_squeeze(self):
A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
assert_(squeeze(A).shape == (3, 3))
def test_cumproduct(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(all(cumproduct(A) == array([1, 2, 6, 24, 120, 720])))
def test_size(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(size(A) == 6)
assert_(size(A, 0) == 2)
assert_(size(A, 1) == 3)
def test_mean(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(mean(A) == 3.5)
assert_(all(mean(A, 0) == array([2.5, 3.5, 4.5])))
assert_(all(mean(A, 1) == array([2., 5.])))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(isnan(mean([])))
assert_(w[0].category is RuntimeWarning)
def test_std(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(std(A), 1.707825127659933)
assert_almost_equal(std(A, 0), array([1.5, 1.5, 1.5]))
assert_almost_equal(std(A, 1), array([0.81649658, 0.81649658]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
| assert_(isnan(std([])))
| assert_(w[0].category is RuntimeWarning)
def test_var(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(var(A), 2.9166666666666665)
assert_almost_equal(var(A, 0), array([2.25, 2.25, 2.25]))
assert_almost_equal(var(A, 1), array([0.66666667, 0.66666667]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(isnan(var([])))
assert_(w[0].category is RuntimeWarning)
class TestBoolScalar(TestCase):
def test_logical(self):
f = False_
t = True_
s = "xyz"
self.assertTrue((t and s) is s)
self.assertTrue((f and s) is f)
def test_bitwise_or(self):
f = False_
t = True_
self.assertTrue((t | t) is t)
self.assertTrue((f | t) is t)
self.assertTrue((t | f) is t)
self.assertTrue((f | f) is f)
def test_bitwise_and(self):
f = False_
t = True_
self.assertTrue((t & t) is t)
self.assertTrue((f & t) is f)
self.assertTrue((t & f) is f)
self.assertTrue((f & f) is f)
def test_bitwise_xor(self):
f = False_
t = True_
self.assertTrue((t ^ t) is f)
self.assertTrue((f ^ t) is t)
self.assertTrue((t ^ f) is t)
self.assertTrue((f ^ f) is f)
class TestBoolArray(TestCase):
def setUp(self):
# offset for simd tests
self.t = array([True] * 41, dtype=np.bool)[1::]
self.f = array([False] * 41, dtype=np.bool)[1::]
self.o = array([False] * 42, dtype=np.bool)[2::]
self.nm = self.f.copy()
self.im = self.t.copy()
self.nm[3] = True
self.nm[-2] = True
self.im[3] = False
self.im[-2] = False
def test_all_any(self):
self.assertTrue(self.t.all())
self.assertTrue(self.t.any())
self.assertFalse(self.f.all())
self.assertFalse(self.f.any())
self.assertTrue(self.nm.any())
self.assertTrue(self.im.any())
self.assertFalse(self.nm.all())
self.assertFalse(self.im.all())
# check bad element in all positions
for i in range(256 - 7):
d = array([False] * 256, dtype=np.bool)[7::]
d[i] = True
self.assertTrue(np.any(d))
|
misnyo/searx | tests/unit/engines/test_google_news.py | Python | agpl-3.0 | 4,340 | 0.00023 | # -*- coding: utf-8 -*-
from collections import defaultdict
import mock
from searx.engines import google_news
from searx.testing import SearxTestCase
class TestGoogleNewsEngine(SearxTestCase):
def test_request(self):
query = 'test_query'
dicto = defaultdict(dict)
dicto['pageno'] = 1
dicto['language'] = 'fr-FR'
dicto['time_range'] = 'w'
params = google_news.request(query, dicto)
self.assertIn('url', params)
self.assertIn(query, params['url'])
self.assertIn('fr', params['url'])
def test_response(self):
self.assertRaises(AttributeError, google_news.response, None)
self.assertRaises(AttributeError, google_news.response, [])
self.assertRaises(AttributeError, google_news.response, '')
self.assertRaises(AttributeError, google_news.response, '[]')
response = mock.Mock(text='{}')
self.assertEqual(google_news.response(response), [])
response = mock.Mock(text='{"data": []}')
self.assertEqual(google_news.response(response), [])
html = u"""
<h2 class="hd">Search Results</h2>
<div data-async-context="query:searx" id="ires">
<div eid="oC2oWcGXCafR6ASkwoCwDA" id="rso">
<div class="_NId">
<!--m-->
<div class="g _cy">
<div class="ts _JGs _JHs _tJs _KGs _jHs">
<div class="_hJs">
<h3 class="r _gJs">
<a class="l _PMs" href="https://example.com/" onmousedown="return rwt(this,'','','','11','AFQjCNEyehpzD5cJK1KUfXBx9RmsbqqG9g','','0ahUKEwjB58OR54HWAhWnKJoKHSQhAMY4ChCpAggiKAAwAA','','',event)">Example title</a>
</h3>
<div class="slp">
<span class="_OHs _PHs">
Mac & i</span>
<span class="_QGs">
-</span>
<span class="f nsa _QHs">
Mar 21, 2016</span>
</div>
<div class="st">Example description</div>
</div>
</div>
</div>
<div class="g _cy">
<div class="ts _JGs _JHs _oGs _KGs _jHs">
<a class="top _xGs _SHs" href="https://example2.com/" onmousedown="return rwt(this,'','','','12','AFQjCNHObfH7sYmLWI1SC-YhWXKZFRzRjw','','0ahUKEwjB58OR54HWAhWnKJoKHSQhAMY4ChC8iAEIJDAB','','',event)">
<img class="th _RGs" src="https://example2.com/image.jpg" alt="Story image for searx from Golem.de" onload="typeof google==='object'&&google.aft&&google.aft(this)">
</a>
<div class="_hJs">
<h3 class="r _gJs">
<a class="l _PMs" href="https://example2.com/" onmousedo | wn="return rwt(this,'','','','12','AFQjCNHObfH7sYmLWI1SC-YhWXKZFRzRjw','','0ahUKEwjB58OR54HWAhWnKJoKHSQhAMY4ChCpAgglKAAwAQ','','',event)">Example title 2</a>
</h3>
<div class="slp">
<span class="_OHs _PHs">
Golem.de</span>
<span class="_QGs">
-</span>
<span class="f | nsa _QHs">
Oct 4, 2016</span>
</div>
<div class="st">Example description 2</div>
</div>
</div>
</div>
</div>
</div>
</div>
""" # noqa
response = mock.Mock(text=html)
results = google_news.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 2)
self.assertEqual(results[0]['title'], u'Example title')
self.assertEqual(results[0]['url'], 'https://example.com/')
self.assertEqual(results[0]['content'], 'Example description')
self.assertEqual(results[1]['title'], u'Example title 2')
self.assertEqual(results[1]['url'], 'https://example2.com/')
self.assertEqual(results[1]['content'], 'Example description 2')
self.assertEqual(results[1]['img_src'], 'https://example2.com/image.jpg')
|
HUGG/NGWM2016-modelling-course | Lessons/02-Physics-of-heat-transfer/scripts/1D_intrusion.py | Python | mit | 3,951 | 0.033662 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# 1D_intrusion.py
#
# This script plots the cooling of a 1D intrusion with time
#
# dwhipp 09.13
#--- User-defined input values ------------------------------------------------#
Ti=700. # Intrusion temperature [deg. C]
Tb=200. # Background temperature [deg. C]
l=2. # Intrusion width [km]
w=10. # Model width [km]
kappa=1e-6 # Thermal diffusivity [m^2 s-1]
t1=10. # Time 1 [a]
t2=100. # Time 2 [a]
t3=1000. # Time 3 [a]
t4=10000. # Time 4 [a]
t5=100000. # Time 5 [a]
t6=1000000. # Time 6 [a]
numpts=101 # Number of points across w
#--- End user-defined input ---------------------------------------------------#
#--- DO NOT MODIFY ANYTHING BELOW THIS LINE -----------------------------------#
# Import libraries
import pylab,scipy,scipy.special
# Scale input values
l=l*1000. | # km -> m
w=w*1000. # km -> m
t1=t1*365.25*24*3600
t2=t2*365.25*24*3600
t3= | t3*365.25*24*3600
t4=t4*365.25*24*3600
t5=t5*365.25*24*3600
t6=t6*365.25*24*3600
# Parameter ranges
x=pylab.linspace(-w,w,numpts);
# Temperature calculation
T1=Tb+((Ti-Tb)/2)*(scipy.special.erf((0.5*l-x)/(scipy.sqrt(4*kappa*t1)))+scipy.special.erf((0.5*l+x)/(scipy.sqrt(4*kappa*t1))))
T2=Tb+((Ti-Tb)/2)*(scipy.special.erf((0.5*l-x)/(scipy.sqrt(4*kappa*t2)))+scipy.special.erf((0.5*l+x)/(scipy.sqrt(4*kappa*t2))))
T3=Tb+((Ti-Tb)/2)*(scipy.special.erf((0.5*l-x)/(scipy.sqrt(4*kappa*t3)))+scipy.special.erf((0.5*l+x)/(scipy.sqrt(4*kappa*t3))))
T4=Tb+((Ti-Tb)/2)*(scipy.special.erf((0.5*l-x)/(scipy.sqrt(4*kappa*t4)))+scipy.special.erf((0.5*l+x)/(scipy.sqrt(4*kappa*t4))))
T5=Tb+((Ti-Tb)/2)*(scipy.special.erf((0.5*l-x)/(scipy.sqrt(4*kappa*t5)))+scipy.special.erf((0.5*l+x)/(scipy.sqrt(4*kappa*t5))))
T6=Tb+((Ti-Tb)/2)*(scipy.special.erf((0.5*l-x)/(scipy.sqrt(4*kappa*t6)))+scipy.special.erf((0.5*l+x)/(scipy.sqrt(4*kappa*t6))))
# Rescale for plotting
x=x/1000.
w=w/1000.
l=l/1000.
t1=t1/(365.25*24*3600)
t2=t2/(365.25*24*3600)
t3=t3/(365.25*24*3600)/1000.0
t4=t4/(365.25*24*3600)/1000.0
t5=t5/(365.25*24*3600)/1000.0
t6=t6/(365.25*24*3600)/1000000.0
# Initial intrusion geometry
xgeom=pylab.array([-w,0.-l/2.,0.-l/2.,0.+l/2.,0.+l/2.,w])
Tgeom=pylab.array([Tb,Tb,Ti,Ti,Tb,Tb])
# Plot geotherm
pylab.plot(xgeom,Tgeom,linewidth=2,color='black')
pylab.plot(x,T1,'black',label=str(t1)+' a',lw=2)
pylab.scatter(x,T1,s=8,color='black')
pylab.plot(x,T2,'red',label=str(t2)+' a',lw=2)
pylab.scatter(x,T2,s=8,color='red')
pylab.plot(x,T3,'blue',label=str(t3)+' ka',lw=2)
pylab.scatter(x,T3,s=8,color='blue')
pylab.plot(x,T4,'green',label=str(t4)+' ka',lw=2)
pylab.scatter(x,T4,s=8,color='green')
pylab.plot(x,T5,'purple',label=str(t5)+' ka',lw=2)
pylab.scatter(x,T5,s=8,color='purple')
pylab.plot(x,T6,'gray',label=str(t6)+' Ma',lw=2)
pylab.scatter(x,T6,s=8,color='gray')
pylab.axis([-w, w, Tb-50., Ti+50.])
pylab.text(780,5,'1D geotherm with advection',color='k')
pylab.xlabel('Width [km]')
pylab.ylabel('Temperature [$^\circ$C]')
pylab.legend()
pylab.show()
|
ChinaMassClouds/copenstack-server | openstack/src/ceilometer-2014.2.2/ceilometer/alarm/storage/impl_sqlalchemy.py | Python | gpl-2.0 | 12,576 | 0 | #
# Author: John Tran <jhtran@att.com>
# Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SQLAlchemy storage backend."""
from __future__ import absolute_import
import os
from oslo.config import cfg
from oslo.db.sqlalchemy import session as db_session
from sqlalchemy import desc
import ceilometer
from ceilometer.alarm.storage import base
from ceilometer.alarm.storage import models as alarm_api_models
from ceilometer.openstack.common import log
from ceilometer.storage.sqlalchemy import models
from ceilometer.storage.sqlalchemy import utils as sql_utils
from ceilometer import utils
LOG = log.getLogger(__name__)
AVAILABLE_CAPABILITIES = {
'alarms': {'query': {'simple': True,
'complex': True},
'history': {'query': {'simple': True,
'complex': True}}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
class Connection(base.Connection):
"""Put the data into a SQLAlchemy database.
Tables::
- meter
- meter definition
- { id: meter def id
name: meter name
| type: meter type
unit: meter unit
}
- sample
- the raw incoming data
- { id: sample id
meter_id: meter id (->meter.id)
user_id: user uuid
project_id: project uuid
resource_id: resource | uuid
source_id: source id
resource_metadata: metadata dictionaries
volume: sample volume
timestamp: datetime
message_signature: message signature
message_id: message uuid
}
"""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
def __init__(self, url):
self._engine_facade = db_session.EngineFacade(
url,
**dict(cfg.CONF.database.items())
)
def upgrade(self):
# NOTE(gordc): to minimise memory, only import migration when needed
from oslo.db.sqlalchemy import migration
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'..', '..', 'storage', 'sqlalchemy',
'migrate_repo')
migration.db_sync(self._engine_facade.get_engine(), path)
def clear(self):
engine = self._engine_facade.get_engine()
for table in reversed(models.Base.metadata.sorted_tables):
engine.execute(table.delete())
self._engine_facade._session_maker.close_all()
engine.dispose()
def _retrieve_data(self, filter_expr, orderby, limit, table):
if limit == 0:
return []
session = self._engine_facade.get_session()
query = session.query(table)
transformer = sql_utils.QueryTransformer(table, query)
if filter_expr is not None:
transformer.apply_filter(filter_expr)
transformer.apply_options(orderby,
limit)
retrieve = {models.Alarm: self._retrieve_alarms,
models.AlarmChange: self._retrieve_alarm_history}
return retrieve[table](transformer.get_query())
@staticmethod
def _row_to_alarm_model(row):
return alarm_api_models.Alarm(alarm_id=row.alarm_id,
enabled=row.enabled,
type=row.type,
name=row.name,
description=row.description,
timestamp=row.timestamp,
user_id=row.user_id,
project_id=row.project_id,
state=row.state,
state_timestamp=row.state_timestamp,
ok_actions=row.ok_actions,
alarm_actions=row.alarm_actions,
insufficient_data_actions=(
row.insufficient_data_actions),
rule=row.rule,
time_constraints=row.time_constraints,
repeat_actions=row.repeat_actions)
def _retrieve_alarms(self, query):
return (self._row_to_alarm_model(x) for x in query.all())
def get_alarms(self, name=None, user=None, state=None, meter=None,
project=None, enabled=None, alarm_id=None, pagination=None):
"""Yields a lists of alarms that match filters
:param user: Optional ID for user that owns the resource.
:param state: Optional string for alarm state.
:param meter: Optional string for alarms associated with meter.
:param project: Optional ID for project that owns the resource.
:param enabled: Optional boolean to list disable alarm.
:param alarm_id: Optional alarm_id to return one alarm.
:param pagination: Optional pagination query.
"""
if pagination:
raise ceilometer.NotImplementedError('Pagination not implemented')
session = self._engine_facade.get_session()
query = session.query(models.Alarm)
if name is not None:
query = query.filter(models.Alarm.name == name)
if enabled is not None:
query = query.filter(models.Alarm.enabled == enabled)
if user is not None:
query = query.filter(models.Alarm.user_id == user)
if project is not None:
query = query.filter(models.Alarm.project_id == project)
if alarm_id is not None:
query = query.filter(models.Alarm.alarm_id == alarm_id)
if state is not None:
query = query.filter(models.Alarm.state == state)
alarms = self._retrieve_alarms(query)
# TODO(cmart): improve this by using sqlalchemy.func factory
if meter is not None:
alarms = filter(lambda row:
row.rule.get('meter_name', None) == meter,
alarms)
return alarms
def create_alarm(self, alarm):
"""Create an alarm.
:param alarm: The alarm to create.
"""
session = self._engine_facade.get_session()
with session.begin():
alarm_row = models.Alarm(alarm_id=alarm.alarm_id)
alarm_row.update(alarm.as_dict())
session.add(alarm_row)
return self._row_to_alarm_model(alarm_row)
def update_alarm(self, alarm):
"""Update an alarm.
:param alarm: the new Alarm to update
"""
session = self._engine_facade.get_session()
with session.begin():
alarm_row = session.merge(models.Alarm(alarm_id=alarm.alarm_id))
alarm_row.update(alarm.as_dict())
return self._row_to_alarm_model(alarm_row)
def delete_alarm(self, alarm_id):
"""Delete an alarm
:param alarm_id: ID of the alarm to delete
"""
session = self._engine_facade.get_session()
with session.begin():
session.query(models.Alarm).filter(
models.Alarm.alarm_id == alarm_id).delete()
@staticmethod
def _row_to_alarm_change_model(row):
return alarm_api_model |
PaloAltoNetworks/minemeld-core | minemeld/ft/dag.py | Python | apache-2.0 | 20,185 | 0.000149 | # Copyright 2015 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
import yaml
import netaddr
import os
import re
import collections
import itertools
import shutil
import gevent
import gevent.queue
import gevent.event
import pan.xapi
from . import base
from . import actorbase
from . import table
from .utils import utc_millisec
LOG = logging.getLogger(__name__)
SUBRE = re.compile("[^A-Za-z0-9_]")
class DevicePusher(gevent.Greenlet):
def __init__(self, device, prefix, watermark, attributes, persistent):
super(DevicePusher, self).__init__()
self.device = device
self.xapi = pan.xapi.PanXapi(
tag=self.device.get('tag', None),
api_username=self.device.get('api_username', None),
api_password=self.device.get('api_password', None),
api_key=self.device.get('api_key', None),
port=self.device.get('port', None),
hostname=self.device.get('hostname', None),
serial=self.device.get('serial', None)
)
self.prefix = prefix
self.attributes = attributes
self.watermark = watermark
self.persistent = persistent
self.q = gevent.queue.Queue()
def put(self, op, address, value):
LOG.debug('adding %s:%s to device queue', op, address)
self.q.put([op, address, value])
def _get_registered_ip_tags(self, ip):
self.xapi.op(
cmd='<show><object><registered-ip><ip>%s</ip></registered-ip></object></show>' % ip,
vsys=self.device.get('vsys', None),
cmd_xml=False
)
entries = self.xapi.element_root.findall('./result/entry')
if entries is None or len(entries) == 0:
LOG.warning('%s: ip %s has no tags', self.device.get('hostname', None), ip)
return None
tags = [member.text for member in entries[0].findall('./tag/member')
if member.text and member.text.startswith(self.prefix)]
return tags
def _get_all_registered_ips(self):
cmd = (
'<show><object><registered-ip><tag><entry name="%s%s"/></tag></registered-ip></object></show>' %
(self.prefix, self.watermark)
)
self.xapi.op(
cmd=cmd,
vsys=self.device.get('vsys', None),
cmd_xml=False
)
entries = self.xapi.element_root.findall('./result/entry')
if not entries:
return
for entry in entries:
ip = entry.get("ip")
yield ip, self._get_registered_ip_tags(ip)
def _dag_message(self, type_, addresses):
message = [
"<uid-message>",
"<version>1.0</version>",
"<type>update</type>",
"<payload>"
]
persistent = ''
if type_ == 'register':
persistent = ' persistent="%d"' % (1 if self.persistent else 0)
message.append('<%s>' % type_)
if addresses is not None and len(addresses) != 0:
akeys = sorted(addresses.keys())
for a in akeys:
message.append(
'<entry ip="%s"%s>' % (a, persistent)
)
tags = sorted(addresses[a])
if tags is not None:
message.append('<tag>')
for t in tags:
message.append('<member>%s</member>' % t)
message.append('</tag>')
message.append('</entry>')
message.append('</%s>' % type_)
message.append('</payload></uid-message>')
return ''.join(message)
def _user_id(self, cmd=None):
try:
self.xapi.user_id(cmd=cmd,
vsys=self.device.get('vsys', None))
except gevent.GreenletExit:
raise
except pan.xapi.PanXapiError as e:
LOG.debug('%s', e)
if 'already exists, ignore' in str(e):
pass
elif 'does not exist, ignore unreg' in str(e):
pass
elif 'Failed to register' in str(e):
pass
else:
LOG.exception('XAPI exception in pusher for device %s: %s',
self.device.get('hostname', None), str(e))
raise
def _tags_from_value(self, value):
result = []
def _tag(t, v):
if type(v) == unicode:
v = v.encode('ascii', 'replace')
else:
v = str(v)
v = SUBRE.sub('_', v)
tag = '%s%s_%s' % (self.prefix, t, v)
return tag
for t in self.attributes:
if t in value:
if t == 'confidence':
confidence = value[t]
if confidence < 50:
tag = '%s%s_low' % (self.prefix, t)
elif confidence < 75:
tag = '%s%s_medium' % (self.prefix, t)
else:
tag = '%s%s_high' % (self.prefix, t)
result.append(tag)
else:
LOG.debug('%s %s %s', t, value[t], type(value[t]))
if isinstance(value[t], list):
for v in value[t]:
LOG.debug('%s', v)
result.append(_tag(t, v))
else:
result.append(_tag(t, value[t]))
else:
# XXX noop for this case?
result.append('%s%s_unknown' % (self.prefix, t))
LOG.debug('%s', result)
return set(result) # XXX eliminate duplicates
def _push(self, op, address, value):
tags = []
tags.append('%s%s' % (self.prefix, self.watermark))
tags += self._tags_from_value(value)
if len(tags) == 0:
tags = None
msg = self._dag_message(op, {address: tags})
s | elf._user_id(cmd=msg)
def _init_resync(self):
ctags = collections.defaultdict(set)
while True:
op, address, value = self.q.get()
if op == 'EOI':
break
if op != 'init':
raise RuntimeError(
'DevicePusher %s - wrong op %s received in init phase' %
(self.device.get('hostname', None), op)
)
ctags[address].add('%s% | s' % (self.prefix, self.watermark))
for t in self._tags_from_value(value):
ctags[address].add(t)
LOG.debug('%s', ctags)
register = collections.defaultdict(list)
unregister = collections.defaultdict(list)
for a, atags in self._get_all_registered_ips():
regtags = set()
if atags is not None:
for t in atags:
regtags.add(t)
added = ctags[a] - regtags
removed = regtags - ctags[a]
for t in added:
register[a].append(t)
for t in removed:
unregister[a].append(t)
ctags.pop(a)
# ips not in firewall
for a, atags in ctags.iteritems():
register[a] = atags
LOG.debug('register %s', register)
LOG.debug('unregister %s', unregister)
# XXX use constant for chunk size
if len(register) != 0:
addrs = iter(register)
for i in xrange(0, len(register), 1000):
rmsg = self._dag_message(
'register',
|
pyro-ppl/numpyro | numpyro/contrib/einstein/steinvi.py | Python | apache-2.0 | 18,091 | 0.001879 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from collections import namedtuple
import functools
from functools import partial
from itertools import chain
import operator
from typing import Callable
import jax
import jax.numpy as jnp
import jax.random
from jax.tree_util import tree_map
from numpyro import handlers
from numpyro.contrib.einstein.kernels import SteinKernel
from numpyro.contrib.einstein.util import batch_ravel_pytree, get_parameter_transform
from numpyro.contrib.funsor import config_enumerate, enum
from numpyro.distributions import Distribution, Normal
from numpyro.distributions.constraints import real
from numpyro.distributions.transforms import IdentityTransform
from numpyro.infer.autoguide import AutoGuide
from numpyro.infer.util import _guess_max_plate_nesting, transform_fn
from numpyro.util import fori_collect, ravel_pytree
SteinVIState = namedtuple("SteinVIState", ["optim_state", "rng_key"])
SteinVIRunResult = namedtuple("SteinRunResult", ["params", "state", "losses"])
def _numel(shape):
return functools.reduce(operator.mul, shape, 1)
class SteinVI:
"""Stein variational inference for stein mixtures.
:param model: Python callable with Pyro primitives for the model.
:param guide: Python callable with Pyro primitives for the guide
(recognition network).
:param optim: an instance of :class:`~numpyro.optim._NumpyroOptim`.
:param loss: ELBO loss, i.e. negative Evidence Lower Bound, to minimize.
:param kernel_fn: Function that produces a logarithm of the statistical kernel to use with Stein inference
:param num_particles: number of particles for Stein inference.
(More particles capture more of the posterior distribution)
:param loss_temperature: scaling of loss factor
:param repulsion_temperature: scaling of repulsive forces (Non-linear Stein)
:param enum: whether to apply automatic marginalization of discrete variables
:param classic_guide_param_fn: predicate on names of parameters in guide which should be optimized classically
without Stein (E.g. parameters for large normal networks or other transformation)
:param static_kwargs: Static keyword arguments for the model / guide, i.e. arguments
that remain constant during fitting.
"""
def __init__(
self,
model,
guide,
optim,
loss,
kernel_fn: SteinKernel,
num_particles: int = 10,
loss_temperature: float = 1.0,
repulsion_temperature: float = 1.0,
classic_guide_params_fn: Callable[[str], bool] = lambda name: False,
enum=True,
**static_kwargs,
):
self._inference_model = model
self.model = model
self.guide = guide
self.optim = optim
self.loss = loss
self.kernel_fn = kernel_fn
self.static_kwargs = static_kwargs
self.num_particles = num_particles
self.loss_temperature = loss_temperature
self.repulsion_temperature = repulsion_temperature
self.enum = enum
self.classic_guide_params_fn = classic_guide_params_fn
self.guide_param_names = None
self.constrain_fn = None
self.uconstrain_fn = None
self.particle_transform_fn = None
self.particle_transforms = None
def _apply_kernel(self, kernel, x, y, v):
if self.kernel_fn.mode == "norm" or self.kernel_fn.mode == "vector":
return kernel(x, y) * v
else:
return kernel(x, y) @ v
def _kernel_grad(self, kernel, x, y):
if self.kernel_fn.mode == "norm":
return jax.grad(lambda x: kernel(x, y))(x)
elif self.kernel_fn.mode == "vector":
return jax.vmap(lambda i: jax.grad(lambda x: kernel(x, y)[i])(x)[i])(
jnp.arange(x.shape[0])
)
else:
return jax.vmap(
lambda l: jnp.sum(
jax.vmap(lambda m: jax.grad(lambda x: kernel(x, y)[l, m])(x)[m])(
jnp.arange(x.shape[0])
)
)
)(jnp.arange(x.shape[0]))
def _param_size(self, param):
if isinstance(param, tuple) or isinstance(param, list):
return sum(map(self._param_size, param))
return param.size
def _calc_particle_info(self, uparams, num_particles, start_index=0):
uparam_keys = list(uparams.keys())
uparam_keys.sort()
res = {}
end_index = start_index
for k in uparam_keys:
if isinstance(uparams[k], dict):
res_sub, end_index = self._calc_particle_info(
uparams[k], num_particles, start_index
)
res[k] = res_sub
else:
end_index = start_index + self._param_size(uparams[k]) // num_particles
res[k] = (start_index, end_index)
start_index = end_index
return res, end_index
def _find_init_params(self, particle_seed, inner_guide, inner_guide_trace):
def extract_info(site):
nonlocal particle_seed
name = site["name"]
value = site["value"]
constraint = site["kwargs"].get("constraint", real)
transform = get_parameter_transform(site)
if (
isinstance(inner_guide, AutoGuide)
and "_".join((inner_guide.prefix, "loc")) in name
):
site_key, particle_seed = jax.random.split(particle_seed)
unconstrained_shape = transform.inverse_shape(value.shape)
init_value = jnp.expand_dims(
transform.inv(value), 0
) + Normal( # Add gaussian noise
scale=0.1
).sample(
particle_seed, (self.num_particles, *unconstrained_shape)
)
init_value = transform(init_value)
else:
site_fn = site["fn"]
site_args = site["args"]
site_key, particle_seed = jax.random.split(particle_seed)
def _reinit(seed):
with handlers.seed(rng_seed=seed):
return site_fn(*site_args)
init_value = jax.vmap(_reinit)(
jax.random.split(particle_seed, self.num_particles)
)
return init_value, constraint
| init_params = {
name: extract_info(site)
for name, site in inner_guide_trace.items()
if site.get("type") == "param"
}
return init_params
def _svgd_loss_and_grads(self, rng_key, unconstr_params, *args, **kwargs):
# 0. Separate model and guide parameters, since only guide parameters are updated using Stein
classic_uparams = {
p: v
for p, v i | n unconstr_params.items()
if p not in self.guide_param_names or self.classic_guide_params_fn(p)
}
stein_uparams = {
p: v for p, v in unconstr_params.items() if p not in classic_uparams
}
# 1. Collect each guide parameter into monolithic particles that capture correlations
# between parameter values across each individual particle
stein_particles, unravel_pytree, unravel_pytree_batched = batch_ravel_pytree(
stein_uparams, nbatch_dims=1
)
particle_info, _ = self._calc_particle_info(
stein_uparams, stein_particles.shape[0]
)
# 2. Calculate loss and gradients for each parameter
def scaled_loss(rng_key, classic_params, stein_params):
params = {**classic_params, **stein_params}
loss_val = self.loss.loss(
rng_key,
params,
handlers.scale(self._inference_model, self.loss_temperature),
self.guide,
*args,
**kwargs,
)
return -loss_val
def kernel_particle_loss_fn(ps):
return scaled_loss(
rng_key,
self.constr |
lcy-seso/Paddle | python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py | Python | apache-2.0 | 5,038 | 0.000397 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor
import math
import sys
dict_size = 30000
source_dict_dim = target_dict_dim = dict_size
src_dict, trg_dict = paddle.dataset.wmt14.get_dict(dict_size)
hidden_dim = 32
word_dim = 16
IS_SPARSE = True
batch_size = 10
max_length = 50
topk_size = 50
trg_dic_size = 10000
decoder_size = hidden_dim
# need to fix random seed and training data to compare the loss
# value accurately calculated by the default and the memory optimization
# version.
fluid.default_startup_program().random_seed = 111
def encoder_decoder():
# encoder
src_word_id = layers.data(
name="src_word_id", shape=[1], dtype='int64', lod_level=1)
src_embedding = layers.embedding(
input=src_word_id,
size=[dict_size, word_dim],
dtype='float32',
is_sparse=IS_SPARSE,
param_attr=fluid.ParamAttr(name='vemb'))
fc1 = fluid.layers.fc(input=src_embedding, size=hidden_dim * 4, act='tanh')
lstm_hidden0, lstm_0 = layers.dynamic_lstm(input=fc1, size=hidden_dim * 4)
encoder_out = layers.sequence_last_step(input=lstm_hidden0)
# decoder
trg_language_word = layers.data(
name="target_language_word", shape=[1], dtype='int64', lod_level=1)
trg_embedding = layers.embedding(
input=trg_language_word,
size=[dict_size, word_dim],
dtype='float32',
is_sparse=IS_SPARSE,
param_attr=fluid.ParamAttr(name='vemb'))
rnn = fluid.layers.DynamicRNN()
with rnn.block():
current_word = rnn.step_input(trg_embedding)
mem = rnn.memory(init=encoder_out)
fc1 = fluid.layers.fc(input=[current_word, mem],
size=decoder_size,
act='tanh')
out = fluid.layers.fc(input=fc1, size=target_dict_dim, act='softmax')
rnn.update_memory(mem, fc1)
rnn.output(out)
return rnn()
def to_lodtensor(data, place):
seq_lens = [len(seq) for seq in data]
cur_len = 0
lod = [cur_len]
for l in | seq_lens:
cur_len += l
lod.append(cur_len)
flattened_data = np.concatenate(data, axis=0).astype("int64")
flattened_data = flattened_data.reshape([len(flattened_data), 1])
res = core.LoDTensor()
res.set(flattened_data, place)
res.set_lod([lod])
ret | urn res
def main():
rnn_out = encoder_decoder()
label = layers.data(
name="target_language_next_word", shape=[1], dtype='int64', lod_level=1)
cost = layers.cross_entropy(input=rnn_out, label=label)
avg_cost = fluid.layers.mean(cost)
optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4)
optimizer.minimize(avg_cost)
# fluid.memory_optimize(fluid.default_main_program())
fluid.release_memory(fluid.default_main_program())
# fix the order of training data
train_data = paddle.batch(
paddle.dataset.wmt14.train(dict_size), batch_size=batch_size)
# train_data = paddle.batch(
# paddle.reader.shuffle(
# paddle.dataset.wmt14.train(dict_size), buf_size=1000),
# batch_size=batch_size)
place = core.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
batch_id = 0
for pass_id in xrange(10):
for data in train_data():
word_data = to_lodtensor(map(lambda x: x[0], data), place)
trg_word = to_lodtensor(map(lambda x: x[1], data), place)
trg_word_next = to_lodtensor(map(lambda x: x[2], data), place)
outs = exe.run(fluid.default_main_program(),
feed={
'src_word_id': word_data,
'target_language_word': trg_word,
'target_language_next_word': trg_word_next
},
fetch_list=[avg_cost])
avg_cost_val = np.array(outs[0])
print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) +
" avg_cost=" + str(avg_cost_val))
if batch_id > 2:
exit(0)
if math.isnan(float(avg_cost_val)):
sys.exit("got NaN loss, training failed.")
batch_id += 1
if __name__ == '__main__':
main()
|
wujuguang/motor | motor/motor_gridfs.py | Python | apache-2.0 | 19,133 | 0.003084 | # Copyright 2011-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future | __ import unicode_literals, absolute_import
"""GridFS implementation for Motor, an asynchronous driver for MongoDB."""
import textwrap
import | gridfs
import pymongo
import pymongo.errors
from gridfs import grid_file
from motor.core import (AgnosticBaseCursor,
AgnosticCollection,
AgnosticDatabase,
PY35)
from motor.docstrings import *
from motor.metaprogramming import (AsyncCommand,
AsyncRead,
coroutine_annotation,
create_class_with_framework,
DelegateMethod,
motor_coroutine,
MotorCursorChainingMethod,
ReadOnlyProperty)
class AgnosticGridOutCursor(AgnosticBaseCursor):
__motor_class_name__ = 'MotorGridOutCursor'
__delegate_class__ = gridfs.GridOutCursor
add_option = MotorCursorChainingMethod()
address = ReadOnlyProperty()
collation = ReadOnlyProperty()
comment = MotorCursorChainingMethod()
distinct = AsyncRead()
explain = AsyncRead()
hint = MotorCursorChainingMethod()
limit = MotorCursorChainingMethod()
max = MotorCursorChainingMethod()
max_await_time_ms = MotorCursorChainingMethod()
max_scan = MotorCursorChainingMethod()
max_time_ms = MotorCursorChainingMethod()
min = MotorCursorChainingMethod()
remove_option = MotorCursorChainingMethod()
skip = MotorCursorChainingMethod()
sort = MotorCursorChainingMethod(doc=cursor_sort_doc)
where = MotorCursorChainingMethod()
# PyMongo's GridOutCursor inherits __die from Cursor.
_Cursor__die = AsyncCommand()
def clone(self):
"""Get a clone of this cursor."""
return self.__class__(self.delegate.clone(), self.collection)
def next_object(self):
"""Get next GridOut object from cursor."""
grid_out = super(self.__class__, self).next_object()
if grid_out:
grid_out_class = create_class_with_framework(
AgnosticGridOut, self._framework, self.__module__)
return grid_out_class(self.collection, delegate=grid_out)
else:
# Exhausted.
return None
def rewind(self):
"""Rewind this cursor to its unevaluated state."""
self.delegate.rewind()
self.started = False
return self
def _empty(self):
return self.delegate._Cursor__empty
def _query_flags(self):
return self.delegate._Cursor__query_flags
def _data(self):
return self.delegate._Cursor__data
def _clear_cursor_id(self):
self.delegate._Cursor__id = 0
def _close_exhaust_cursor(self):
# Exhaust MotorGridOutCursors are prohibited.
pass
def _killed(self):
return self.delegate._Cursor__killed
@motor_coroutine
def _close(self):
yield self._framework.yieldable(self._Cursor__die())
class MotorGridOutProperty(ReadOnlyProperty):
"""Creates a readonly attribute on the wrapped PyMongo GridOut."""
def create_attribute(self, cls, attr_name):
def fget(obj):
if not obj.delegate._file:
raise pymongo.errors.InvalidOperation(
"You must call MotorGridOut.open() before accessing "
"the %s property" % attr_name)
return getattr(obj.delegate, attr_name)
doc = getattr(cls.__delegate_class__, attr_name).__doc__
return property(fget=fget, doc=doc)
class AgnosticGridOut(object):
"""Class to read data out of GridFS.
MotorGridOut supports the same attributes as PyMongo's
:class:`~gridfs.grid_file.GridOut`, such as ``_id``, ``content_type``,
etc.
You don't need to instantiate this class directly - use the
methods provided by :class:`~motor.MotorGridFSBucket`. If it **is**
instantiated directly, call :meth:`open`, :meth:`read`, or
:meth:`readline` before accessing its attributes.
"""
__motor_class_name__ = 'MotorGridOut'
__delegate_class__ = gridfs.GridOut
_ensure_file = AsyncCommand()
_id = MotorGridOutProperty()
aliases = MotorGridOutProperty()
chunk_size = MotorGridOutProperty()
close = MotorGridOutProperty()
content_type = MotorGridOutProperty()
filename = MotorGridOutProperty()
length = MotorGridOutProperty()
md5 = MotorGridOutProperty()
metadata = MotorGridOutProperty()
name = MotorGridOutProperty()
read = AsyncRead()
readchunk = AsyncRead()
readline = AsyncRead()
seek = DelegateMethod()
tell = DelegateMethod()
upload_date = MotorGridOutProperty()
def __init__(
self,
root_collection,
file_id=None,
file_document=None,
delegate=None,
):
collection_class = create_class_with_framework(
AgnosticCollection, self._framework, self.__module__)
if not isinstance(root_collection, collection_class):
raise TypeError(
"First argument to MotorGridOut must be "
"MotorCollection, not %r" % root_collection)
if delegate:
self.delegate = delegate
else:
self.delegate = self.__delegate_class__(
root_collection.delegate,
file_id,
file_document)
self.io_loop = root_collection.get_io_loop()
# python.org/dev/peps/pep-0492/#api-design-and-implementation-revisions
if PY35:
exec(textwrap.dedent("""
def __aiter__(self):
return self
async def __anext__(self):
chunk = await self.readchunk()
if chunk:
return chunk
raise StopAsyncIteration()
"""), globals(), locals())
def __getattr__(self, item):
if not self.delegate._file:
raise pymongo.errors.InvalidOperation(
"You must call MotorGridOut.open() before accessing "
"the %s property" % item)
return getattr(self.delegate, item)
@coroutine_annotation
def open(self):
"""Retrieve this file's attributes from the server.
Returns a Future.
.. versionchanged:: 2.0
No longer accepts a callback argument.
.. versionchanged:: 0.2
:class:`~motor.MotorGridOut` now opens itself on demand, calling
``open`` explicitly is rarely needed.
"""
return self._framework.chain_return_value(self._ensure_file(),
self.get_io_loop(),
self)
def get_io_loop(self):
return self.io_loop
@motor_coroutine
def stream_to_handler(self, request_handler):
"""Write the contents of this file to a
:class:`tornado.web.RequestHandler`. This method calls
:meth:`~tornado.web.RequestHandler.flush` on
the RequestHandler, so ensure all headers have already been set.
For a more complete example see the implementation of
:class:`~motor.web.GridFSHandler`.
.. code-block:: python
class FileHandler(tornado.web.RequestHandler):
@tornado.w |
marco-lilek/musiClr | src/utils/modifyTag.py | Python | mit | 835 | 0.037126 | import taglib
import os
TEMP_FILENAME = "temp.mp3"
class TagWrapper:
def __init__(self, fileName):
self.fileName = fileName
try:
| self.tag = taglib.MP3(fileName)
except:
self.tag = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.tag != None:
| del self.tag
def hasTags(self):
return self.tag.artist is not None and self.tag.name is not None
def getData(self):
return self.tag.artist, self.tag.name
def modify(self, artist, name):
if self.tag == None:
raise TypeError
self.tag.artist = artist
self.tag.name = name
self.tag.dump(TEMP_FILENAME)
del self.tag
self.tag = None
os.remove(self.fileName)
os.rename(TEMP_FILENAME, self.fileName)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.