repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
madongo/Intel-i-gent-Pet-Feeder | scan.py | Python | gpl-2.0 | 1,801 | 0.031094 | # /org/bluez/hci0/dev_18_7A_93_02_C5_A9
#address 18:7A:93:02:C5:A9
# Device 18:7A:93:02:C5:A9 UUIDs:
# 00001800-0000-1000-8000-00805f9b34fb
# 00001802-0000-1000-8000-00805f9b34fb
# 00001803-0000-1000-8000-00805f9b34fb
# 00001804-0000-1000-8000-00805f9b34fb
# 0000180a-0000-1000-8000-00805f9b34fb
# 0000180d-0000-1000-8000-00805f9b34fb
# 0000180f-0000-1000-8000-00805f9b34fb
# 0000fff0-0000-1000-8000-00805f9b34fb
# 0000fff3-0000-1000-8000-00805f9b34fb
import dbus
import dbus.mainloop.glib
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
SERVICE_NAME = "org.bluez"
ADAPTER_INTERFACE = SERVICE_NAME + ".Adapter1"
DEVICE_INTERFACE = SERVICE_NAME + ".Device1"
def print_rssi(interface, signal, arg, path):
print('hello world')
print(interface, signal, arg, path)
def property_changed():
print('property_changed')
def scan_bt():
bus = dbus.SystemBus()
manager = dbus.Interface(bus.get_object("org.bluez", "/"),"org.freedesktop.DBus.ObjectManager")
managed_objects = manager.GetManagedObjects()
beacon1 = managed_objects['/org/bluez/hci0/dev_18_7A_93_02_C5_A9']
info = beacon1['org.bluez.Device1']
rssi = info['RSSI']
return rssi.real
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop( | set_as_default=True)
bus = dbus.SystemBus()
bus.add_signal_receiver(print_rssi,
dbus_interfac | e = "org.freedesktop.DBus.Properties",
signal_name = "PropertiesChanged",
arg0 = "org.bluez.Device1",
path_keyword = "path")
bus.add_signal_receiver(property_changed,
dbus_interface = "org.bluez.Adapter1",
signal_name = "PropertyChanged")
mainloop = GObject.MainLoop()
mainloop.run() |
scop/bash-completion | test/t/test_pylint.py | Python | gpl-2.0 | 786 | 0 | import pytest |
class TestPylint:
@pytest.mark.complete("pylint --v", require_longopt=True)
def test_1(self, completion):
assert completion
@pytest.mark.complete("pylint --confidence=HIGH,")
def test_2(self, completion):
| assert completion
@pytest.mark.complete("pylint --help-msg=", require_longopt=True)
def test_all_message_ids(self, completion):
assert any("-" in x for x in completion)
@pytest.mark.complete("pylint --disable=", require_longopt=True)
def test_enabled_message_ids(self, completion):
assert any("-" in x for x in completion)
@pytest.mark.complete("pylint --enable=foo,", require_longopt=True)
def test_disabled_message_ids(self, completion):
assert any("-" in x for x in completion)
|
tetherless-world/satoru | search.py | Python | apache-2.0 | 152 | 0.013158 | import warnings as | __warnings
__warnings.warn("unqualified module names deprecated, use whyis.search", DeprecationWarning)
from w | hyis.search import *
|
openstack/mistral | mistral/lang/v2/actions.py | Python | apache-2.0 | 2,707 | 0 | # Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS O | F ANY KIND, either express or implied.
# See the L | icense for the specific language governing permissions and
# limitations under the License.
from mistral.lang import types
from mistral.lang.v2 import base
from mistral_lib import utils
class ActionSpec(base.BaseSpec):
# See http://json-schema.org
_schema = {
"type": "object",
"properties": {
"base": types.NONEMPTY_STRING,
"base-input": types.NONEMPTY_DICT,
"input": types.UNIQUE_STRING_OR_ONE_KEY_DICT_LIST,
"output": types.ANY_NULLABLE,
},
"required": ["base"],
"additionalProperties": False
}
def __init__(self, data, validate):
super(ActionSpec, self).__init__(data, validate)
self._name = data['name']
self._description = data.get('description')
self._tags = data.get('tags', [])
self._base = data['base']
self._base_input = data.get('base-input', {})
self._input = utils.get_dict_from_entries(data.get('input', []))
self._output = data.get('output')
self._base, _input = self._parse_cmd_and_input(self._base)
utils.merge_dicts(self._base_input, _input)
def validate_schema(self):
super(ActionSpec, self).validate_schema()
# Validate YAQL expressions.
inline_params = self._parse_cmd_and_input(self._data.get('base'))[1]
self.validate_expr(inline_params)
self.validate_expr(self._data.get('base-input', {}))
if isinstance(self._data.get('output'), str):
self.validate_expr(self._data.get('output'))
def get_name(self):
return self._name
def get_description(self):
return self._description
def get_tags(self):
return self._tags
def get_base(self):
return self._base
def get_base_input(self):
return self._base_input
def get_input(self):
return self._input
def get_output(self):
return self._output
class ActionSpecList(base.BaseSpecList):
item_class = ActionSpec
class ActionListSpec(base.BaseListSpec):
item_class = ActionSpec
def get_actions(self):
return self.get_items()
|
leiferikb/bitpop-private | chrome/test/install_test/chrome.py | Python | bsd-3-clause | 3,017 | 0.002652 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extended WebDriver interface that uses helper extension.
This file is makeshift and should eventually be switched over to
using the new ChromeDriver python interface. However, as that is
not quite ready, this class simply installs a helper extension
and executes scripts in the background page to access extension
APIs.
This may end up being merged with chrome/test/ext_auto, if they
accomplish similar enough purposes. For now, integration with that
is a bit premature.
"""
import os
from selenium import webdriver
_CHROME_GET_VIEW_HANDLES = 'chrome.getViewHandles'
_EXTENSION = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'ext_auto')
class Chrome(webdriver.Remote):
"""Extended | WebDriver interface that uses helper extension."""
def __init__(self, url, desired_capabilities):
switches = desired_capabilities.get('chrome.switches', [])
switches += ['--load-extension=' + _EXTENSION]
desired_capabilities['chrome.switches'] = switc | hes
super(Chrome, self).__init__(url, desired_capabilities)
custom_commands = {
_CHROME_GET_VIEW_HANDLES:
('GET', '/session/$sessionId/chrome/views'),
}
self.command_executor._commands.update(custom_commands)
views = self.execute(_CHROME_GET_VIEW_HANDLES)['value']
self.set_script_timeout(30) # TODO(kkania): Make this configurable.
for view in views:
if view.get('extension_id') == 'aapnijgdinlhnhlmodcfapnahmbfebeb':
self._extension = view['handle']
break
else:
raise RuntimeError('Unable to find helper extension')
def _execute_extension_command(self, name, params={}):
"""Executes an extension command.
When Chrome is started, a helper extension is loaded which provides
a simple synchronous API for manipulating Chrome via the extension
APIs. Communication with the extension is accomplished by executing
a script in the background page of the extension which calls the
'executeCommand' function with the name of the command, a parameter
dictionary, and a callback function that can be used to signal
when the command is finished and potentially send a return value.
"""
old_window = self.current_window_handle
self.switch_to_window(self._extension)
self.execute_async_script(
'executeCommand.apply(null, arguments)', name, params)
self.switch_to_window(old_window)
def create_tab(self, url=None):
"""Creates a new tab with the given URL and switches to it.
If no URL is provided, the homepage will be used.
"""
params = {}
if url is not None:
params['url'] = url
self._execute_extension_command('createTab', params)
self.switch_to_window(self.window_handles[-1])
def create_blank_tab(self):
"""Creates a new blank tab and switches to it."""
self.create_tab('about:blank')
|
LTKills/languages | python/15.py | Python | gpl-3.0 | 156 | 0.012821 | #Specia | l Numbers
import math
for i in range(1000, 10000):
a = str(i)
b, c = int(a[:2]), int(a[2:])
if(math.sqrt(i) == b+c): |
print (i)
|
MutsuGhost1/StudyGroupPython | hw1/look_folder_yield_2.py | Python | gpl-2.0 | 1,995 | 0.007519 | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Administrator
#
# Created: 06/09/2013
# Copyright: (c) Administrator 2013
# Licence: <your licence>
#-------------------------------------------------------------------------------
import os, sys
from os.path import isdir, join, splitext, getsize
def __formatOutput(root, dict):
output = ""
output += "-" * 10 + os.linesep
output += ("In Folder: " + str(root) + os.linesep)
for ext, size in dict.items() :
output += ("\t "+ ext + " total size:"+ str(size) + os.linesep)
output += ("-" * 10 + os.linesep)
return output
def myWalk(root, topdown=False):
names = os.listdir(root)
dirs, non_dirs = [], []
for name in names:
if isdir(join(root, name)):
dirs.append(name)
else:
non_dirs.append(name)
if topdown:
yield root, dirs, non_dirs
for name in dirs:
new_path = join(root, name)
for x in myWalk(new_path, topdown):
yield x
if not topdown:
yield root, dirs, non_dirs
def look_folder(root):
for root_path, dirs, non_dirs in myWalk(root):
dict_ext = {}
for file_name in non_dirs:
full_file_path = join(root_path, file_name)
name, ext = splitext(full_file_path)
if ext in dict_ext:
dict_ext[ext] += getsize(full_file_path)
else:
dict_ext[ext] = getsize(full_file_path)
yield __formatOutput(root_path, dict_ext)
def main():
if(2 == len(sys.argv)):
if isdir(sys.argv[1]):
for result in look_folder(sys.argv[1]):
print result
else:
print "Invalud Full Path!"
print | "Usage: python look_folder_recursive.py [full-path]"
else:
print "Usage: python look_folder_recursi | ve.py [full-path]"
if __name__ == '__main__':
main()
|
FedoraScientific/salome-paravis | test/VisuPrs/IsoSurfaces/A0.py | Python | lgpl-2.1 | 1,507 | 0.002654 | # Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# | You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This case corresponds to: /visu/IsoSurfaces/A0 case
# Create Iso Surface for all data of the given MED file
import | sys
from paravistest import datadir, pictureext, get_picture_dir
from presentations import CreatePrsForFile, PrsTypeEnum
import pvserver as paravis
# Create presentations
myParavis = paravis.myParavis
# Directory for saving snapshots
picturedir = get_picture_dir("IsoSurfaces/A0")
file = datadir + "fra.med"
print " --------------------------------- "
print "file ", file
print " --------------------------------- "
print "CreatePrsForFile..."
CreatePrsForFile(myParavis, file, [PrsTypeEnum.ISOSURFACES], picturedir, pictureext)
|
larrybradley/astropy-helpers | astropy_helpers/sphinx/conf.py | Python | bsd-3-clause | 11,881 | 0.002525 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy shared Sphinx settings. These settings are shared between
# astropy itself and affiliated packages.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import warnings
from os import path
import sphinx
from distutils.version import LooseVersion
# -- General configuration ----------------------------------------------------
# The version check in Sphinx itself can only compare the major and
# minor parts of the version number, not the micro. To do a more
# specific version check, call check_sphinx_version("x.y.z.") from
# your project's conf.py
needs_sphinx = '1.3'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
def check_sphinx_version(expected_version):
sphinx_version = LooseVersion(sphinx.__version__)
expected_version = LooseVersion(expected_version)
if sphinx_version < expected_version:
raise RuntimeError(
"At least Sphinx version {0} is required to build this "
"documentation. Found {1}.".format(
expected_version, sphinx_version))
# Configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/3/', None),
'pythonloc': ('http://docs.python.org/',
path.abspath(path.join(path.dirname(__file__),
'local/python3_local_links.inv'))),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.org/', None),
'astropy': ('http://docs.astropy.org/en/stable/', None),
'h5py': ('http://docs.h5py.org/en/latest/', None)}
if sys.version_info[0] == 2:
intersphinx_mapping['python'] = ('http://docs.python.org/2/', None)
intersphinx_mapping['pythonloc'] = (
'http://docs.python.org/',
path.abspath(path.join(path.dirname(__file__),
'local/python2_local_links.inv')))
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# The reST default role (used for this markup: `text`) to use for all
# documents. Set to the "smart" one.
default_role = 'obj'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog = """
.. _Astropy: http://astropy.org
"""
# A list of warning types to suppress arbitrary warning messages. We mean to
# override directives in astropy_helpers.sphinx.ext.autodoc_enhancements,
# thus need to ignore those warning. This can be removed once the patch gets
# released in upstream Sphinx (https://github.com/sphinx-doc/sphinx/pull/1843).
# Suppress the warnings requires Sphinx v1.4.2
suppress_warnings = ['app.add_directive', ]
# -- Project information ------------------------------------------------------
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Settings for extensions and extension options ----------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.viewcode',
'astropy_helpers.extern.numpydoc',
'astropy_helpers.extern.automodapi.automodapi',
'astropy_helpers.extern.automodapi.smart_resolver',
'astropy_helpers.sphinx.ext.tocdepthfix',
'astropy_helpers.sphinx.ext.doctest',
'astropy_helpers.sphinx.ext.changelog_links']
if on_rtd:
extensions.append('sphinx.ext.mathjax')
elif LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
# Above, we use a patched version of viewcode rather than 'sphinx.ext.viewcode'
# This can be changed to the sphinx version once the following issue is fixed
# in sphinx:
# https://bitbucket.org/birkenfeld/sphinx/issue/623/
# extension-viewcode-fails-with-function
try:
import matplotlib.sphinxext.plot_directive
extensions += [matplotlib.sphinxext.plot_directive.__name__]
# AttributeError is checked here in case matplotlib is installed but
# Sphinx isn't. Note that this module is imported by the config file
# generator, even if we're not building the docs.
except (ImportError, AttributeError):
warnings.warn(
"matplotlib's plot_directive could not be imported. " +
"Inline plots will not be included in the output")
# Don't show summaries of the members in each class along with the
# class' docstring
numpydoc_show_class_members = False
autosummary_generate = True
automodapi_toctreedirnm = 'api'
# Class documentation should contain *both* the class docstring and
# the __init__ docstring
autoclass_content = "both"
# Render inheritance diagrams in SVG
graphviz_output_format = "svg"
graphviz_dot_args = [
'-Nfontsize=10',
'-Nfontname=Helvetica Neue, Helvetica, Arial, sans-serif',
'-Efontsize=10',
'-Efontname=Helvetica Neue, Helvetica, Arial, sans-serif',
'-Gfontsize=10',
'-Gfontname=Helvetica Neue, Helvetica, Arial, sans-serif'
]
# -- Options for HTML output -------------------------------------------------
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [path.abspath(path.join(path.dirname(__file__), 'themes'))]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap-astropy'
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html'],
'search': [],
'genindex': [],
'py-modindex': [],
}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# included in the bootstrap-astropy theme
html_favicon = path.join(html_theme_path[0], html_theme, 'static',
'astropy_logo.ico')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d %b %Y'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentati | on.
#html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# I | f true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# |
BeyondTheClouds/nova | nova/tests/unit/compute/test_resource_tracker.py | Python | apache-2.0 | 57,954 | 0.000466 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licen | sed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to | in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import copy
import datetime
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
from nova.compute.monitors import base as monitor_base
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import pci_device_pool
from nova import rpc
from nova import test
from nova.tests.unit.pci import fakes as pci_fakes
from nova.tests import uuidsentinel
from nova.virt import driver
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_COERCED = {'virt_stat': '10'}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.1',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
'parent_addr': '0000:00:01.0',
},
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.2',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
'parent_addr': '0000:00:01.0',
},
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_PF,
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
},
{
'label': 'label_8086_0123',
'dev_type': 'type-PCI',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0123',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
},
{
'label': 'label_8086_7891',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '7891',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': None,
'parent_addr': '0000:08:01.0',
},
] if self.pci_support else []
self.pci_stats = [
{
'count': 2,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1,
'dev_type': fields.PciDeviceType.SRIOV_VF
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1,
'dev_type': fields.PciDeviceType.SRIOV_PF
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '7891',
'numa_node': None,
'dev_type': fields.PciDeviceType.SRIOV_VF
},
] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology._to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
@mock.patch('stevedore.enabled.EnabledExtensionManager')
def setUp(self, _mock_ext_mgr):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self._set_pci_passthrough_whitelist()
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._instance_types = {}
self.stubs.Set(objects.InstanceList, 'get_by_host_and_node',
self._fake_instance_get_by_host_and_node)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
|
BGmi/BGmi | tests/test_data_source.py | Python | mit | 1,562 | 0.001295 | import pytest
from bgmi.lib.fetch import DATA_SOURCE_MAP
from bgmi.website import mikan
from bgmi.website.base import BaseWebsite
from bgmi.website.model import Episode, SubtitleGroup, WebsiteBangumi
@pytest.mark.parametrize("source", DATA_SOURCE_MAP.keys())
def test_info(source, data_source_bangumi_name):
w = DATA_SOURCE_MAP[source]() # type: BaseWebsite
bangumi_result = w.fetch_bangumi_calendar()
assert bangumi_result, f"website {source} should return bangumi list"
for bangumi in bangumi_result:
assert bangumi.cover.startswith("https://") or bangumi.cover.startswith(
"http://"
), "cover not starts with https:// or http://"
assert isinstance(bangumi, WebsiteBangumi)
for s in bangumi.subtitle_group:
assert isinstance(s, SubtitleGroup)
b = bangumi_result[0]
w.fetch_episode_of_bangumi(b.keyword, max_page=3)
w.fetch_single_bangumi(b.keyword)
@pytest.mark.parametrize("source", DATA_SOURCE_MAP. | keys())
def test_search(source, data_source_bangumi_name):
w = DATA_SOURCE_MAP[source]()
search_result = w.search_by_keyword(data_source_bangumi_name[source][0], count=1)
assert search_result
for episode in search_result:
assert isinstance(episode, Episode)
def test_mikan_fetch_all_episode():
"""
大欺诈师 极影字幕社
https://mikanani.me/Home/Bangumi/2242
"""
w = mikan.Mikanani()
| results = w.fetch_episode_of_bangumi("2242", ["34"])
assert len(results) > 15, "should fetch more episode in expand button"
|
WesleyPeng/uiXautomation | src/test/python/bpt/pages/binghomepage.py | Python | apache-2.0 | 1,210 | 0 | # Co | pyright (c) 2017-2018 {Flair Inc.} WESLEY PENG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You m | ay obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from taf.modeling.web import WebButton
from taf.modeling.web import WebTextBox
from .basepage import BasePage
from .searchresultspage import SearchResultsPage
class BingHomePage(BasePage):
def __init__(self, url=None, *elements, **conditions):
super(BingHomePage, self).__init__(
url, *elements, **conditions
)
self.txt_search_box = WebTextBox(id='sb_form_q')
self.btn_search_go = WebButton(id='sb_form_go')
def search_with_keyword(self, keyword):
self.txt_search_box.set(keyword)
self.btn_search_go.click()
return SearchResultsPage()
|
guildenstern70/pyfab | src/generators/pdfgenerator/loader.py | Python | mit | 4,301 | 0.00651 | """
FableGenerator
fablegenerator.loader.py
@author: Alessio Saltarin
"""
from generators import chapter, templateloader
import sys
import codecs
import os.path
import fablepage
import fableme.utils as utils
import logging
class SimpleLoader(templateloader.TemplateLoader):
def __init__(self, fable_id, lang, character, dedication):
super(SimpleLoader, self).__init__(fable_id, lang, character, dedication)
def build(self):
if self._buildFableFromFile():
if len(self.paras) > 0:
self.fable_doc = fablepage.PdfFableDoc(self._title, standalone=True)
self._parseFile()
self._addCover()
self.fable_doc.addTitle(self._title, self._dedication)
for xchapter in self.chapters:
self._buildChapter(self.fable_doc, xchapter)
else:
print 'CRITICAL Loader Error: empty contents.'
raise
self.fable_doc.build()
def get_images_path_to(self, filename):
pics_folder = "F_PICS"
if self._character.sex == 'M':
pics_folder = "M_PICS"
filepath_en = self._get_resources_path_lang()
images_path = os.path.join(filepath_en, pics_folder)
lang_code = self._language.language_code()
if lang_code != "EN":
finalpath_otherlang = os.path.normpath(os.path.join(filepath_en, lang_code))
fullfilepath = os.path.join(finalpath_otherlang, pics_folder)
path_to_file = os.path.join(fullfilepath, filename)
if os.path.isfile(path_to_file):
images_path = fullfilepath
return os.path.join(images_path, filename)
def _get_format(self):
return '.pdf'
def __get_fable(self):
return self.fable_doc
def __get_pdf_file(self):
return self._ebook_file
fable = property(__get_fable, doc="""Get the fable document.""")
fable_file = property(__get_pdf_file, doc="""Get fable PDF file path.""")
class GoogleLoader(SimpleLoader):
@classmethod
def from_fable_db(cls, dbfable):
fable_template_id = dbfable.template_id
lang = dbfable.language
character = dbfable.character
dedication = dbfable.get_full_dedication()
return cls(fable_template_id, lang, character, dedication)
def load_template(self):
self._read_file_template()
return self._replace_tags()
def build(self):
if len(self.paras) > 0:
self.fable_doc = fablepage.PdfFableDoc(self._title, standalone=False)
self._parseFile()
self._addCover()
self.fable_doc.addTitle(self._title, self._dedication)
for chapter in self.chapters:
self._buildChapter(self.fable_doc, chapter)
else:
logging.error('CRITICAL PDF Error: empty contents.')
raise
self.fable_doc.build()
def save(self, file_h):
saved = True
try:
if self.fable_doc:
self.fable_doc.save(file_h)
else:
logging.warn('Aborting PDF save: fable_doc is null.')
saved = False
except:
saved = False
return saved
|
def get_resources_path_to(self, filename):
filename = os.path.join(self._get_resources_path(), filename)
return utils.GoogleUtils.get_from_ | google(filename)
def get_template(self):
return self._template_file
def _read_file_template(self):
read_ok = True
try:
template_googlepath = self._get_resources_path_to(self._template['template_text_file'])
logging.debug('Reading from ' + template_googlepath + '...')
fablefile = codecs.open(template_googlepath, "r", "utf-8")
self._fabletemplate = unicode(fablefile.read())
fablefile.close()
logging.debug('Reading file done.')
except:
read_ok = False
logging.error('*** Error reading fable template...')
logging.error('*** %s', sys.exc_info())
return read_ok
|
Soovox/django-socialregistration | socialregistration/clients/__init__.py | Python | mit | 1,878 | 0.007455 | from django.conf import settings
USE_HTTPS = bool(getattr(settings, 'SOCIALREGISTRATION_USE_HTTPS', False))
class Client(object):
"""
Base class for OAuth/OpenID clients. Subclasses must implement all the
methods.
"""
def is_https(self):
"""
Check if the site is using HTTPS. This is controlled with
``SOCIALREGISTRATION_USE_HTTPS`` setting.
"""
return USE_HTTPS
def get_redirect_url(self, **kwargs):
"""
Returns the URL where we'll be requesting access/permissions from the
user.
"""
raise NotImplementedError
def get_callback_url(self):
"""
Returns the URL where the service should redirect the user back to
once permissions/access were granted - or not. Thi | s should take in
acc | ount the value returned by ``self.is_https()``.
"""
raise NotImplementedError
def request(self, url, method="GET", params=None, headers=None, **kwargs):
"""
Make signed requests against ``url``. Signing method depends on the
protocol used.
:param url: The API endpoint to request
:param method: The HTTP method to be used
:param params: The parameters to be used for the request
:type params: dict
:param headers: Additional headers to be sent with the request
:type headers: dict
"""
raise NotImplementedError
def get_user_info(self):
"""
Return the current user's information.
"""
raise NotImplementedError
@staticmethod
def get_session_key():
"""
Return a unique identifier to store this client in the user's session
for the duration of the authentication/authorization process.
"""
raise NotImplementedError
|
possoumous/Watchers | seleniumbase/fixtures/email_manager.py | Python | mit | 17,901 | 0.000056 | """
EmailManager - a helper class to login, search for, and delete emails.
"""
import email
import htmlentitydefs
import imaplib
import quopri
import re
import time
import types
from seleniumbase.config import settings
class EmailManager:
""" A helper class to interface with an Email account. These imap methods
can search for and fetch messages without needing a browser.
Example:
em = EmailManager()
result = em.check_for_recipient(
"[GMAIL.USER]+[SOME CODE OR TIMESTAMP KEY]@gmail.com")
"""
HTML = "text/html"
PLAIN = "text/plain"
TIMEOUT = 1800
def __init__(self, uname=settings.EMAIL_USERNAME,
pwd=settings.EMAIL_PASSWORD,
imap_string=settings.EMAIL_IMAP_STRING,
port=settings.EMAIL_IMAP_PORT):
self.uname = uname
self.pwd = pwd
self.imap_string = imap_string
self.port = port
def imap_connect(self):
"""
Connect to the IMAP mailbox.
"""
self.mailbox = imaplib.IMAP4_SSL(self.imap_string, self.port)
self.mailbox.login(self.uname, self.pwd)
self.mailbox.select()
def imap_disconnect(self):
"""
Disconnect from the IMAP mailbox.
"""
self.mailbox.close()
self.mailbox.logout()
def __imap_search(self, ** criteria_dict):
""" Searches for query in the given IMAP criteria and returns
the message numbers that match as a list of strings.
Criteria without values (eg DELETED) should be keyword args
with KEY=True, or else not passed. Criteria with values should
be keyword args of the form KEY="VALUE" where KEY is a valid
IMAP key.
IMAP default is to AND all criteria together. We don't support
other logic quite yet.
All valid keys: ALL, ANSWERED, BCC <string>, BEFORE <string>,
BODY <string>, CC <string>, DELETED, DRAFT, FLAGGED, FROM
<string>, HEADER <field-name> <string> (UNTESTED), KEYWORD
<flag>, LARGER <n>, NEW, NOT <search-key>, OLD, ON <date>,
OR <search-key1> <search-key2> (UNTESTED), RECENT, SEEN,
SENTBEFORE <date>, SENTON <date>, SENTSINCE <date>, SINCE <date>,
SMALLER <n>, SUBJECT <string>, TEXT <string>, TO <string>,
UID <sequence set>, UNANSWERED, UNDELETED, UNDRAFT, UNFLAGGED,
UNKEYWORD <flag>, UNSEEN.
For details on keys and their values, see
http://tools.ietf.org/html/rfc3501#section-6.4.4
:param criteria_dict: dictionary of search criteria keywords
:raises: EmailException if something in IMAP breaks
:returns: List of message numbers as strings matched by given criteria
"""
self.imap_connect()
criteria = []
for key in criteria_dict:
if criteria_dict[key] is True:
criteria.append('(%s)' % key)
else:
criteria.append('(%s "%s")' % (key, criteria_dict[key]))
# If any of these criteria are not valid IMAP keys, IMAP will tell us.
status, msg_nums = self.mailbox.search('UTF-8', * criteria)
self.imap_disconnect()
if 0 == len(msg_nums):
msg_nums = []
if 'OK' in status:
return self.__parse_imap_search_result(msg_nums)
else:
raise EmailException("IMAP status is " + str(status))
def remove_formatting(self, html):
"""
Clean out any whitespace
@Params
html - String of html to remove whitespace from
@Returns
Cleaned string
"""
return ' '.join(html.split())
def __parse_imap_search_result(self, result):
"""
This takes the result of imap_search and returns SANE results
@Params
result - result from an imap_search call
@Returns
List of IMAP search results
"""
if isinstance(result, types.ListType):
# Above is same as "type(result) == types.ListType"
if len(result) == 1:
return self.__parse_imap_search_result(result[0])
else:
return result
elif isinstance(result, types.StringType):
# Above is same as "type(result) == types.StringType"
return result.split()
else:
# Fail silently assuming tests will fail if emails are not found
return []
def fetch_html(self, msg_nums):
"""
Given a message number that we found with imap_search,
get the text/html content.
@Params
msg_nums - message number to get html message for
@Returns
HTML content of message matched by message number
"""
if not msg_nums:
raise Exception("Invalid Message Number!")
return self.__imap_fetch_content_type(msg_nums, self.HTML)
def fetch_plaintext(self, msg_nums):
"""
Given a message number that we found with imap_search,
get the text/plain content.
@Params
msg_nums - message number to get message for
@Returns
Plaintext content of message matched by message number
"""
if not msg_nums:
raise Exception("Invalid Message Number!")
return self.__imap_fetch_content_type(msg_nums, self.PLAIN)
def __imap_fetch_content_type(self, msg_nums, content_type):
"""
Given a message number that we found with imap_search, fetch the
whole source, dump that into an email object, and pick out the part
that matches the content type specified. Return that, if we got
multiple emails, return dict of all the parts.
@Params
msg_nums - message number to search for
content_type - content type of email message to return
@Returns
Specified content type string or dict of all content types of matched
email.
"""
if not msg_nums:
raise Exception("Invalid Message Number!")
if not content_type:
raise Exception("Need a content type!")
contents = {}
self.imap_connect()
for num in msg_nums:
status, data = self.mailbox.fetch(num, "(RFC822)")
for response_part in data:
if isinstance(response_part, tuple):
msg = email.message_from_string(response_part[1])
for part in msg.walk():
if str(part.get_content_type()) == content_type:
content = str(part.get_payload(decode=True))
contents[int(num)] = content
self.imap_disconnect()
return contents
def fetch_html_by_subject(self, email_name):
"""
Get the html of an email, searching by subject.
@Params
email_name - the subject to search for
@Returns
HTML content of the matched email
"""
if not email_name:
raise EmailException("Subject cannot be null")
results = self.__imap_search(SUBJECT=email_name)
sources = self.fetch_html(results)
return sources
def fetch_plaintext_by_subject(self, email_name):
"""
Get the plain text of an email, searching by subject.
@Params
email_name - the subject to search for
@Returns
Plaintext content of the matched email
"""
if not email_name:
raise EmailException("Subject cannot be null")
results = self.__imap_search(SUBJECT=email_name)
sources = self.fetch_plaintext(results)
return sources
def search_for_recipient(self, email, timeout=None, content_type=None):
"""
Get content of emails, sent to a specific email address.
@Params
email - the recipient email address to search for
timeout - seconds to try beore timing out
content_type - type of email string to return
@Returns
Content of the matched email in the given content type
" | ""
return self.search(timeout=timeout,
c | ontent |
maxalbert/blaze | blaze/expr/expressions.py | Python | bsd-3-clause | 22,980 | 0.000218 | from __future__ import absolute_import, division, print_function
from keyword import iskeyword
import re
import datashape
from datashape import dshape, DataShape, Record, Var, Mono, Fixed
from datashape.predicates import isscalar, iscollection, isboolean, isrecord
import numpy as np
from odo.utils import copydoc
import toolz
from toolz import concat, memoize, partial, first
from toolz.curried import map, filter
from ..compatibility import _strtypes, builtins, boundmethod, PY2
from .core import Node, subs, common_subexpression, path
from .method_dispatch import select_functions
from ..dispatch import dispatch
from .utils import hashable_index, replace_slices
__all__ = ['Expr', 'ElemWise', 'Field', 'Symbol', 'discover', 'Projection',
'projection', 'Selection', 'selection', 'Label', 'label', 'Map',
'ReLabel', 'relabel', 'Apply', 'apply', 'Slice', 'shape', 'ndim',
'label', 'symbol', 'Coerce', 'coerce']
_attr_cache = dict()
def isvalid_identifier(s):
"""Check whether a string is a valid Python identifier
Examples
--------
>>> isvalid_identifier('Hello')
True
>>> isvalid_identifier('Hello world')
False
>>> isvalid_identifier('Helloworld!')
False
>>> isvalid_identifier('1a')
False
>>> isvalid_identifier('a1')
True
>>> isvalid_identifier('for' | )
False
>>> isvalid_identifier(None)
False
"""
# the re module compiles and caches regexs so no need to compile it
return (s is not None and not iskeyword(s) and
re.match(r'^[_a-zA-Z][_a-zA-Z0-9]*$', s) is not None)
def valid_identifier(s):
"""Rewrite a string to be a val | id identifier if it contains
>>> valid_identifier('hello')
'hello'
>>> valid_identifier('hello world')
'hello_world'
>>> valid_identifier('hello.world')
'hello_world'
>>> valid_identifier('hello-world')
'hello_world'
>>> valid_identifier(None)
>>> valid_identifier('1a')
"""
if isinstance(s, _strtypes):
if s[0].isdigit():
return
return s.replace(' ', '_').replace('.', '_').replace('-', '_')
return s
class Expr(Node):
"""
Symbolic expression of a computation
All Blaze expressions (Join, By, Sort, ...) descend from this class. It
contains shared logic and syntax. It in turn inherits from ``Node`` which
holds all tree traversal logic
"""
def _get_field(self, fieldname):
if not isinstance(self.dshape.measure, Record):
if fieldname == self._name:
return self
raise ValueError(
"Can not get field '%s' of non-record expression %s" %
(fieldname, self))
return Field(self, fieldname)
def __getitem__(self, key):
if isinstance(key, _strtypes) and key in self.fields:
return self._get_field(key)
elif isinstance(key, Expr) and iscollection(key.dshape):
return selection(self, key)
elif (isinstance(key, list)
and builtins.all(isinstance(k, _strtypes) for k in key)):
if set(key).issubset(self.fields):
return self._project(key)
else:
raise ValueError('Names %s not consistent with known names %s'
% (key, self.fields))
elif (isinstance(key, tuple) and
all(isinstance(k, (int, slice, type(None), list, np.ndarray))
for k in key)):
return sliceit(self, key)
elif isinstance(key, (slice, int, type(None), list, np.ndarray)):
return sliceit(self, (key,))
raise ValueError("Not understood %s[%s]" % (self, key))
def map(self, func, schema=None, name=None):
return Map(self, func, schema, name)
def _project(self, key):
return projection(self, key)
@property
def schema(self):
return datashape.dshape(self.dshape.measure)
@property
def fields(self):
if isinstance(self.dshape.measure, Record):
return self.dshape.measure.names
name = getattr(self, '_name', None)
if name is not None:
return [self._name]
return []
def _len(self):
try:
return int(self.dshape[0])
except TypeError:
raise ValueError('Can not determine length of table with the '
'following datashape: %s' % self.dshape)
def __len__(self): # pragma: no cover
return self._len()
def __iter__(self):
raise NotImplementedError(
'Iteration over expressions is not supported.\n'
'Iterate over computed result instead, e.g. \n'
"\titer(expr) # don't do this\n"
"\titer(compute(expr)) # do this instead")
def __dir__(self):
result = dir(type(self))
if isrecord(self.dshape.measure) and self.fields:
result.extend(list(map(valid_identifier, self.fields)))
d = toolz.merge(schema_methods(self.dshape.measure),
dshape_methods(self.dshape))
result.extend(list(d))
return sorted(set(filter(isvalid_identifier, result)))
def __getattr__(self, key):
if key == '_hash':
raise AttributeError()
try:
return _attr_cache[(self, key)]
except:
pass
try:
result = object.__getattribute__(self, key)
except AttributeError:
fields = dict(zip(map(valid_identifier, self.fields),
self.fields))
if self.fields and key in fields:
if isscalar(self.dshape.measure): # t.foo.foo is t.foo
result = self
else:
result = self[fields[key]]
else:
d = toolz.merge(schema_methods(self.dshape.measure),
dshape_methods(self.dshape))
if key in d:
func = d[key]
if func in method_properties:
result = func(self)
else:
result = boundmethod(func, self)
else:
raise
_attr_cache[(self, key)] = result
return result
@property
def _name(self):
if (isscalar(self.dshape.measure) and
len(self._inputs) == 1 and
isscalar(self._child.dshape.measure)):
return self._child._name
def __enter__(self):
""" Enter context """
return self
def __exit__(self, *args):
""" Exit context
Close any open resource if we are called in context
"""
for value in self._resources().values():
try:
value.close()
except AttributeError:
pass
return True
_symbol_cache = dict()
def _symbol_key(args, kwargs):
if len(args) == 1:
name, = args
ds = None
token = None
if len(args) == 2:
name, ds = args
token = None
elif len(args) == 3:
name, ds, token = args
ds = kwargs.get('dshape', ds)
token = kwargs.get('token', token)
ds = dshape(ds)
return (name, ds, token)
@memoize(cache=_symbol_cache, key=_symbol_key)
def symbol(name, dshape, token=None):
return Symbol(name, dshape, token=token)
class Symbol(Expr):
"""
Symbolic data. The leaf of a Blaze expression
Examples
--------
>>> points = symbol('points', '5 * 3 * {x: int, y: int}')
>>> points
points
>>> points.dshape
dshape("5 * 3 * {x: int32, y: int32}")
"""
__slots__ = '_hash', '_name', 'dshape', '_token'
__inputs__ = ()
def __init__(self, name, dshape, token=None):
self._name = name
if isinstance(dshape, _strtypes):
dshape = datashape.dshape(dshape)
if isinstance(dshape, Mono) and not isinstance(dshape, DataShape):
dshape = DataShape(dshape)
self.dshape = dshape
self._token = token
def __str__(self):
return self._name or '' |
andymiller/flecks | examples/st_lgcp_mix_examples.py | Python | mit | 6,575 | 0.025551 | import pylab as plt
import numpy as np
from lgcp.examples.util import gen_synthetic_data
from lgcp.st_mix_lgcp import SpatioTemporalMixLGCP
#def st_lgcp_mix_examples_1d():
if __name__=="__main__":
""" Very simple 1d space example """
np.random.seed(100)
# example parameters and sythetic data
xdim = 1
K = 2
xgrid_dims = [100]
xbbox = [[-10,10]]
tgrid_dims = np.array([100])
tbbox = np.array([0,50])
data, B_gt, W_gt, xgrid, tgrid, X, T, Z = \
gen_synthetic_data( xdim = xdim, K=K,
xgrid_dims = xgrid_dims,
xbbox = xbbox,
tgrid_dims = tgrid_dims,
tbbox = tbbox )
#visualize
fig, axarr = plt.subplots(3,2)
axarr[0,0].plot(xgrid, B_gt.T)
axarr[0,0].set_title("spatial bumps")
axarr[0,0].set_xlabel('X space')
axarr[0,0].set_xlim(xbbox[0])
axarr[1,0].plot(tgrid, W_gt.T)
axarr[1,0].set_title("temporal weights")
axarr[1,0].set_ylabel('T space')
axarr[1,0].set_xlim(tbbox/2)
# `ax` is a 3D-aware axis instance because of the projection='3d' keyword argument to add_subplot
#fig = plt.figure(figsize=(14,6))
#ax = fig.add_subplot(1, 1, 1, projection='3d')
# surface_plot with color grading and color bar
#p = ax.plot_surface(X, T, Z, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
#plt.set_title("Spatiotemporal Intensity")
#cb = fig.colorbar(p, shrink=0.5)
#plt.show()
axarr[2, | 0].scatter(data[:,0], data[:,1], marker='.')
axarr[2,0].set_title("observed point process")
axarr[2,0].set_xlabel('X space')
a | xarr[2,0].set_ylabel("TIME")
axarr[2,0].set_xlim(xbbox[0])
axarr[2,0].set_ylim(tbbox/2)
fig.tight_layout()
#
# fit model
#
train_data = data[ data[:,1] < tbbox[1]/2 ] #grab
K = 2
dim = 1
#xgrid_dims = xgrid_dims # downsample space and time
#tgrid_dims = tgrid_dims
#tbbox /= 2
model = SpatioTemporalMixLGCP( xdim = xdim,
K = K,
xgrid_dims = xgrid_dims,
xbbox = xbbox,
tgrid_dims = tgrid_dims/2,
tbbox = tbbox/2 )
model.describe()
init_xh = np.array( [[1, .3], [1, .3]] )
init_th = np.array( [[15., 20.], [15, 20.]] )
w_samps, b_samps, th_samps, lls = model.fit( data,
Nsamps = 500,
init_xh = init_xh,
init_th = init_th )
max_idx = lls.argmax()
model.plot_basis_from_samp(b_samps[max_idx], axarr[0,1])
model.plot_weights_from_samp(w_samps[max_idx], axarr[1,1])
f = plt.figure()
plt.plot(lls)
# plot Temporal Hyper parameter traces
model.plot_time_hypers()
model.plot_space_hypers()
##
# visualize resulting posterior intensity surfs
##
#create ground truth basis/weights
#compute posterior surface
Lambda_mean, Lambda_var = model.posterior_mean_var_lambda(samp_start=10, thin=5)
Lambda_gt = B_gt.T.dot(W_gt) #V by T matrix
Lambda_gt_train = Lambda_gt[:,0:np.floor( Lambda_gt.shape[1]/2 )]
#plot comparison
fig, axarr = plt.subplots(3,1)
vmin = np.min( np.column_stack([Lambda_gt, Lambda_mean]) )
vmax = np.max( np.column_stack([Lambda_gt, Lambda_mean]) )
axarr[0].imshow(Lambda_gt_train, origin='lower', vmin=vmin, vmax=vmax,
extent=[0,tbbox[1]/2, xbbox[0][0], xbbox[0][1]])
axarr[0].set_title('Ground truth intensity function')
mean_im = axarr[1].imshow(Lambda_mean, origin='lower', vmin=vmin, vmax=vmax,
extent=[0,tbbox[1]/2, xbbox[0][0], xbbox[0][1]])
axarr[1].set_title('Posterior mean intensity function')
fig.subplots_adjust(right=0.8)
mean_cbar_ax = fig.add_axes([0.85, 0.65, 0.025, 0.25])
fig.colorbar(mean_im, cax=mean_cbar_ax)
#plot lambda variance
var_im = axarr[2].imshow(Lambda_var, origin='lower',
extent=[0,tbbox[1]/2, xbbox[0][0], xbbox[0][1]])
axarr[2].set_title('Posterior uncertainty')
var_cbar_ax = fig.add_axes([0.85, .05, 0.025, 0.25])
fig.colorbar(var_im, cax=var_cbar_ax)
#fig.tight_layout()
#Beta_gt_biased = np.log(B_gt)
#Beta_gt_0 = Beta_gt_biased.mean(axis=1)
#Beta_gt = np.column_stack((Beta_gt_0, (Beta_gt_biased.T-Beta_gt_0).T))
#Omega_gt_biased = np.log(W_gt)
#Omega_gt_0 = Omega_gt_biased.mean(axis=1)
#Omega_gt = np.column_stack((Omega_gt_0, (Omega_gt_biased.T-Omega_gt_0).T))
#th_gt = np.concatenate( (Beta_gt.ravel(), Omega_gt.ravel()) )
#################################################
# make predictions
#################################################
#visualize test lambda
test_data = data[data[:,1] > tbbox[1]/2]
test_tbbox = np.array([tbbox[1]/2, tbbox[1]])
test_lam, test_grid = model.test_likelihood( test_data,
tgrid_dims/2,
test_tbbox,
num_samps = 100)
fullLam = np.column_stack((Lambda_mean, test_lam))
fig, axarr = plt.subplots(2,1)
axarr[1].imshow(fullLam, origin='lower',
extent=[tbbox[0], tbbox[1], xbbox[0][0], xbbox[0][1]])
axarr[1].set_title('Inferred $\lambda^{old}$ (left) and projected $\lambda^{new}$')
axarr[0].imshow(Lambda_gt, origin='lower',
extent=[tbbox[0], tbbox[1], xbbox[0][0], xbbox[0][1]])
axarr[0].set_title("Ground truth, both post and post predictive")
#visualize forward sampled bases
w_pred = model.sample_conditional_intensity( w_samps[max_idx],
th_samps[max_idx],
test_grid )
w_mat = w_samps[max_idx].reshape((K,-1))
full_w = np.column_stack((w_mat[:,1:], w_pred[:,1:]))
full_t = np.concatenate( [model._grids[-1], test_grid] )
plt.figure()
plt.plot(full_t, full_w.T)
plt.show()
#ground truth loglike
ll_gt = np.sum(model._grid_obs*np.log(Lambda_gt_train) - Lambda_gt_train )
print "Ground truth loglike: ", ll_gt #model._log_like(th_gt)
print "sample max loglike: ", lls.max()
|
msneddon/narrative | src/biokbase/narrative/handlers/authhandlers.py | Python | mit | 4,088 | 0.002446 | """
KBase handlers for authentication in the Jupyter notebook.
"""
__author__ = 'Bill Riehl <wjriehl@lbl.gov>'
from tornado.escape import url_escape
from notebook.base.handlers import IPythonHandler
from traitlets.config import Application
from notebook.auth.login import LoginHandler
from notebook.auth.logout import LogoutHandler
from biokbase.narrative.common.kblogging import (
get_logger, log_event
)
from biokbase.narrative.common.util import kbase_env
import biokbase.auth
import tornado.log
import re
import os
import urllib
import logging
# Set logging up globally.
g_log = get_logger("biokbase.narrative")
app_log = tornado.log.app_log # alias
if Application.initialized:
app_log = Application.instance().log
if os.environ.get('KBASE_DEBUG', False):
app_log.setLevel(logging.DEBUG)
auth_cookie_name = "kbase_sessi | on"
class KBaseLoginHandler(LoginHandler):
"""KBase-specific login handler.
This should get the cookie and put it where it belongs.
A (not-so-distant) future version will return a session token.
"""
def get(self):
"""
Initializes the KBase session from the c | ookie passed into it.
"""
cookie_regex = re.compile('([^ =|]+)=([^\|]*)')
client_ip = self.request.remote_ip
http_headers = self.request.headers
ua = http_headers.get('User-Agent', 'unknown')
# save client ip in environ for later logging
kbase_env.client_ip = client_ip
auth_cookie = self.cookies.get(auth_cookie_name, None)
if auth_cookie:
# Push the cookie
cookie_val = urllib.unquote(auth_cookie.value)
cookie_obj = {
k: v.replace('EQUALSSIGN', '=').replace('PIPESIGN', '|')
for k, v in cookie_regex.findall(cookie_val)
}
if app_log.isEnabledFor(logging.DEBUG):
app_log.debug("kbase cookie = {}".format(cookie_val))
app_log.debug("KBaseLoginHandler.get: user_id={uid} token={tok}"
.format(uid=sess.get('token', 'none'),
tok=sess.get('token', 'none')))
biokbase.auth.set_environ_token(cookie_obj.get('token', None))
kbase_env.session = cookie_obj.get('kbase_sessionid', '')
kbase_env.client_ip = client_ip
kbase_env.user = cookie_obj.get('user_id', '')
log_event(g_log, 'session_start', {'user': kbase_env.user, 'user_agent': ua})
self.current_user = kbase_env.user
app_log.info("KBaseLoginHandler.get(): user={}".format(kbase_env.user))
if self.current_user:
self.redirect(self.get_argument('next', default=self.base_url))
else:
self.write('This is a test?')
def post(self):
pass
@classmethod
def get_user(cls, handler):
user_id = kbase_env.user
if user_id == '':
user_id = 'anonymous'
if user_id is None:
handler.clear_login_cookie()
if not handler.login_available:
user_id = 'anonymous'
return user_id
@classmethod
def password_from_settings(cls, settings):
return u''
@classmethod
def login_available(cls, settings):
"""Whether this LoginHandler is needed - and therefore whether the login page should be displayed."""
return True
class KBaseLogoutHandler(LogoutHandler):
def get(self):
client_ip = self.request.remote_ip
http_headers = self.request.headers
user = kbase_env.user
ua = http_headers.get('User-Agent', 'unknown')
kbase_env.auth_token = 'none'
kbase_env.narrative = 'none'
kbase_env.session = 'none'
kbase_env.user = 'anonymous'
kbase_env.workspace = 'none'
biokbase.auth.set_environ_token(None)
app_log.info('Successfully logged out')
log_event(g_log, 'session_close', {'user': user, 'user_agent': ua})
self.write(self.render_template('logout.html', message={'info': 'Successfully logged out'})) |
chenzheng128/ns-3-dev-git | src/dsdv/bindings/modulegen__gcc_LP64.py | Python | gpl-2.0 | 525,693 | 0.015264 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.dsdv', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module[' | ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.ne | twork')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
module.add_class('Inet6SocketAddress', import_from_module='ns.network')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
module.add_class('InetSocketAddress', import_from_module='ns.network')
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## int-to-type.h (module 'core'): ns3::IntToType<0> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['0'])
## int-to-type.h (module 'core'): ns3::IntToType<0>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 0 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<1> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['1'])
## int-to-type.h (module 'core'): ns3::IntToType<1>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 1 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<2> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['2'])
## int-to-type.h (module 'core'): ns3::IntToType<2>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 2 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<3> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['3'])
## int-to-type.h (module 'core'): ns3::IntToType<3>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 3 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<4> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['4'])
## int-to-type.h (module 'core'): ns3::IntToType<4>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 4 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<5> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['5'])
## int-to-type.h (module 'core'): ns3::IntToType<5>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 5 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<6> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['6'])
## int-to-type.h (module 'core'): ns3::IntToType<6>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 6 >'], import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress [class]
module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet')
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e [enumeration]
module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper [class]
module.add_class('Ipv4RoutingHelper', allow_subclassing=True, import_from_module='ns.internet')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_fro |
tysonholub/twilio-python | twilio/rest/api/v2010/account/sip/domain/auth_types/auth_registrations_mapping/auth_registrations_credential_list_mapping.py | Python | mit | 17,559 | 0.003816 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class AuthRegistrationsCredentialListMappingList(ListResource):
""" """
def __init__(self, version, account_sid, domain_sid):
"""
Initialize the AuthRegistrationsCredentialListMappingList
:param Version version: Version that contains the resource
:param account_sid: The SID of the Account that created the resource
:param domain_sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingList
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingList
"""
super(AuthRegistrationsCredentialListMappingList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, 'domain_sid': domain_sid, }
self._uri = '/Accounts/{account_sid}/SIP/Domains/{domain_sid}/Auth/Registrations/CredentialListMappings.json'.format(**self._solution)
def create(self, credential_list_sid):
"""
Create a new AuthRegistrationsCredentialListMappingInstance
:param unicode credential_list_sid: The SID of the CredentialList resource to map to the SIP domain
:returns: Newly created AuthRegistrationsCredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance
"""
data = values.of({'CredentialListSid': credential_list_sid, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return AuthRegistrationsCredentialListMappingInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
)
def stream(self, limit=None, page_size=None):
"""
Streams AuthRegistrationsCredentialListMappingInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, | so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 | records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists AuthRegistrationsCredentialListMappingInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of AuthRegistrationsCredentialListMappingInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of AuthRegistrationsCredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingPage
"""
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(
'GET',
self._uri,
params=params,
)
return AuthRegistrationsCredentialListMappingPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of AuthRegistrationsCredentialListMappingInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of AuthRegistrationsCredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return AuthRegistrationsCredentialListMappingPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a AuthRegistrationsCredentialListMappingContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingContext
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingContext
"""
return AuthRegistrationsCredentialListMappingContext(
self._version,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a AuthRegistrationsCredentialListMappingContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingContext
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingContext
"""
return AuthRegistrationsCrede |
marratj/ansible | test/units/modules/network/netscaler/test_netscaler_servicegroup.py | Python | gpl-3.0 | 22,813 | 0.00114 |
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.compat.tests.mock import patch, Mock, MagicMock, call
import sys
if sys.version_info[:2] != (2, 6):
import requests
from .netscaler_module import TestModule, nitro_base_patcher, set_module_args
class TestNetscalerServicegroupModule(TestModule):
@classmethod
def setUpClass(cls):
class MockException(Exception):
pass
cls.MockException = MockException
m = MagicMock()
cls.servicegroup_mock = MagicMock()
cls.servicegroup_mock.__class__ = MagicMock()
nssrc_modules_mock = {
'nssrc.com.citrix.netscaler.nitro.resource.config.basic': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup.servicegroup': cls.servicegroup_mock,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding.servicegroup_servicegroupmember_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_lbmonitor_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_lbmonitor_binding.servicegroup_lbmonitor_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_servicegroup_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_servicegroup_binding.lbmonitor_servicegroup_binding': m
}
cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock)
cls.nitro_base_patcher = nitro_base_patcher
@classmethod
def tearDownClass(cls):
cls.nitro_base_patcher.stop()
cls.nitro_specific_patcher.stop()
def set_module_state(self, state):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state=state,
))
def setUp(self):
self.nitro_base_patcher.start()
self.nitro_specific_patcher.start()
# Setup minimal required arguments to pass AnsibleModule argument parsing
def tearDown(self):
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
def test_graceful_nitro_api_import_error(self):
# Stop nitro api patching to cause ImportError
self.set_module_state('present')
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
from ansible.modules.network.netscaler import netscaler_servicegroup
self.module = netscaler_servicegroup
result = self.failed()
self.assertEqual(result['msg'], 'Could not load nitro python sdk')
def test_graceful_nitro_error_on_login(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_servicegroup
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
client_mock = Mock()
client_mock.login = Mock(side_effect=MockException)
m = Mock(return_value=client_mock)
with patch('ansible.modules.network.netscaler.netscaler_servicegroup.get_nitro_client', m):
with patch('ansible.modules.network.netscaler.netscaler_servicegroup.nitro_exception', MockException):
self.module = netscaler_servicegroup
result = self.failed()
self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly')
def test_graceful_no_connection_error(self):
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_servicegroup
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.ConnectionError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_servicegroup',
get_nitro_client=m,
nitro_exception=self.MockException,
):
self.module = netscaler_servicegroup
result = self.failed()
self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully')
def test_graceful_login_error(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_servicegroup
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.SSLError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_servicegroup',
get_nitro_client=m,
nitro_exception=self.MockException,
):
self.module = netscaler_servicegroup
result = self.failed()
self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully')
def test_create_non_existing_servicegroup(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_servicegroup
servicegroup_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
servicegroup_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=servicegroup_proxy_mock)
servicegroup_exists_mock = Mock(side_effect=[False, True])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_servicegroup',
ConfigProxy=m,
servicegroup_exists=servicegroup_exists_mock,
servicemembers_identical=Mock(side_effect=[False, True]),
do_state_change=Mock(return_value=Mock(errorcode=0)),
nitro_exception=self.MockException,
):
self.module = netscaler_servicegroup
result = self.exited()
servicegroup_proxy_mock.assert_has_calls([call.add()])
self.assertTrue(result['changed'], msg='Change not recorded')
def test_update_servicegroup_when_servicegroup_differs(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_servicegroup
servicegroup_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
servicegroup_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_valu | e=servicegroup_proxy_mock)
servicegroup_exists_mock = Mock(side_effect=[True, True])
servicegroup_identical_mock = Mock(side_effect=[False, True])
monitor_bindings_identical_mock = Mock(side_effect=[True, True])
| with patch.multiple(
'ansible.modules.network.netscaler.netscaler_servicegroup',
ConfigProxy=m,
servicegroup_exists=servicegroup_exists_mock,
servicegroup_identical=servicegroup_identical_mock,
monitor |
ioggstream/mysql-utilities | mysql/fabric/services/resharding.py | Python | gpl-2.0 | 35,111 | 0.003589 | #
# Copyright (c) 2014 Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""This module provides the necessary interfaces to perform re-sharding in
fabric. The module takes care of the shard move, shard split and the shard
prune operations.
"""
import logging
import time
from mysql.connector.errorcode import (
ER_NO_SUCH_TABLE,
)
from mysql.fabric import (
errors as _errors,
events as _events,
group_replication as _group_replication,
replication as _replication,
backup as _backup,
utils as _utils,
)
from mysql.fabric.server import (
Group,
MySQLServer,
)
from mysql.fabric.sharding import (
ShardMapping,
RangeShardingSpecification,
HashShardingSpecification,
Shards,
SHARDING_DATATYPE_HANDLER,
SHARDING_SPECIFICATION_HANDLER,
SHARD_METADATA,
SHARD_METADATA_VERIFIER,
)
from mysql.fabric.command import (
ProcedureShard,
)
from mysql.fabric.services import (
sharding as _services_sharding,
utils as _services_utils,
)
_LOGGER = logging.getLogger(__name__)
PRUNE_SHARD_TABLES = _events.Event("PRUNE_SHARD_TABLES")
class PruneShardTables(ProcedureShard):
"""Given the table name prune the tables according to the defined
sharding specification for the table.
"""
group_name = "sharding"
command_name = "prune_shard"
def execute(self, table_name, synchronous=True):
"""Given the table name prune the tables according to the defined
sharding specification for the table. The command prunes all the
tables that are part of this shard. There might be multiple tables that
are part of the same shard, these tables will be related together by
the same sharding key.
:param table_name: The table that needs to be sharded.
:param synchronous: Whether one should wait until the execution finishes
or not.
"""
prune_limit = _services_utils.read_config_value(
self.config,
'sharding',
'prune_limit'
)
procedures = _events.trigger(
PRUNE_SHARD_TABLES,
self.get_lockable_objects(),
table_name,
prune_limit
)
return self.wait_for_procedures(procedures, synchronous)
CHECK_SHARD_INFORMATION = _events.Event("CHECK_SHARD_INFORMATION")
BACKUP_SOURCE_SHARD = _events.Event("BACKUP_SOURCE_SHARD")
RESTORE_SHARD_BACKUP = _events.Event("RESTORE_SHARD_BACKUP")
SETUP_REPLICATION = _events.Event("SETUP_REPLICATION")
SETUP_SYNC = _events.Event("SETUP_SYNC")
SETUP_RESHARDING_SWITCH = _event | s.Event("SETUP_RESHARDING_SWITCH")
PRUNE_SHARDS = _events.Event("PRUNE_SHARDS")
class MoveShardServer(ProcedureShard):
"""Move the shard represented by the shard_id to the destination group.
By default this operation takes a backup, restores it on the destination
group and guarantees that source and destination groups | are synchronized
before pointing the shard to the new group. If users just want to update
the state store and skip these provisioning steps, the update_only
parameter must be set to true.
"""
group_name = "sharding"
command_name = "move_shard"
def execute(self, shard_id, group_id, update_only=False,
synchronous=True):
"""Move the shard represented by the shard_id to the destination group.
:param shard_id: The ID of the shard that needs to be moved.
:param group_id: The ID of the group to which the shard needs to
be moved.
:update_only: Only update the state store and skip provisioning.
:param synchronous: Whether one should wait until the execution finishes
or not.
"""
mysqldump_binary = _services_utils.read_config_value(
self.config,
'sharding',
'mysqldump_program'
)
mysqlclient_binary = _services_utils.read_config_value(
self.config,
'sharding',
'mysqlclient_program'
)
config_file = self.config.config_file if self.config.config_file else ""
procedures = _events.trigger(
CHECK_SHARD_INFORMATION, self.get_lockable_objects(), shard_id,
group_id, mysqldump_binary, mysqlclient_binary, None, config_file,
"", "MOVE", update_only
)
return self.wait_for_procedures(procedures, synchronous)
class SplitShardServer(ProcedureShard):
"""Split the shard represented by the shard_id into the destination group.
By default this operation takes a backup, restores it on the destination
group and guarantees that source and destination groups are synchronized
before pointing the shard to the new group. If users just want to update
the state store and skip these provisioning steps, the update_only
parameter must be set to true.
"""
group_name = "sharding"
command_name = "split_shard"
def execute(self, shard_id, group_id, split_value=None,
update_only=False, synchronous=True):
"""Split the shard represented by the shard_id into the destination
group.
:param shard_id: The shard_id of the shard that needs to be split.
:param group_id: The ID of the group into which the split data needs
to be moved.
:param split_value: The value at which the range needs to be split.
:update_only: Only update the state store and skip provisioning.
:param synchronous: Whether one should wait until the execution
finishes
"""
mysqldump_binary = _services_utils.read_config_value(
self.config,
'sharding',
'mysqldump_program'
)
mysqlclient_binary = _services_utils.read_config_value(
self.config,
'sharding',
'mysqlclient_program'
)
prune_limit = _services_utils.read_config_value(
self.config,
'sharding',
'prune_limit'
)
config_file = self.config.config_file if self.config.config_file else ""
procedures = _events.trigger(
CHECK_SHARD_INFORMATION, self.get_lockable_objects(),
shard_id, group_id, mysqldump_binary, mysqlclient_binary,
split_value, config_file, prune_limit, "SPLIT", update_only)
return self.wait_for_procedures(procedures, synchronous)
@_events.on_event(PRUNE_SHARD_TABLES)
def _prune_shard_tables(table_name, prune_limit):
"""Delete the data from the copied data directories based on the
sharding configuration uploaded in the sharding tables of the state
store. The basic logic consists of
a) Querying the sharding scheme name corresponding to the sharding table
b) Querying the sharding key range using the sharding scheme name.
c) Deleting the sharding keys that fall outside the range for a given
server.
:param table_name: The table_name |
TaliesinSkye/evennia | src/objects/migrations/0020_remove_old_attr_value_field.py | Python | bsd-3-clause | 8,327 | 0.007686 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ObjAttribute.db_value'
db.delete_column('objects_objattribute', 'db_value')
db.rename_column('objects_objattribute', 'db_value2', 'db_value')
def backwards(self, orm):
# Adding field 'ObjAttribute.db_value'
db.add_column('objects_objattribute', 'db_value',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
db.rename_column('objects_objattribute', 'db_value', 'db_value2')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'objects.alias': {
'Meta': {'object_name': 'Alias'},
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['objects.ObjectDB']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'objects.objattribute': {
'Meta': {'object_name': 'ObjAttribute'},
'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['objects.ObjectDB']"}),
'db_value2': ('src.utils.picklefield.PickledObjectField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'objects.objectdb': {
'Meta': {'object_name': 'ObjectDB'},
'db_cmdset_storage': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_destination': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'destinations_set'", 'null': 'True', 'to': "orm['objects.ObjectDB']"}),
'db_home': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'homes_set'", 'null': 'True', 'to': "orm['objects.ObjectDB']"}),
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations_set'", 'null': 'True', 'to': "orm['objects.ObjectDB']"}),
'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'db_permissions': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'db_player': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['players.PlayerDB']", 'null': 'True', 'blank': 'True'}),
'db_sessid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'db_typeclass_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'objects.objectnick': {
'Meta': {'unique_together': "(('db_nick', 'db_type', 'db_obj'),)", 'object_name': 'ObjectNick'},
'db_nick': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['objects.ObjectDB']"}),
'db_real': ('django.db.mode | ls.fields.TextField', [], {}),
'db_type': ('django.db.models.fields.CharField', [], {'default': "'inputline'", 'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'players.playerdb': {
'Meta': {'object_name': 'PlayerDB'},
'db_cmdset_storage': ('django.db.models.fields.CharField', | [], {'max_length': '255', 'null': 'True'}),
'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_is_connected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'db_permissions': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'db_typeclass_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': |
cyplp/botanik | paulla.ircbot/docs/source/conf.py | Python | bsd-3-clause | 10,828 | 0.00628 | # -*- coding: utf-8 -*-
#
# paulla.ircbot documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 29 07:58:36 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinxcontrib.gen_node',
]
# sample configuration of sphinxcontrib.gen_node
# gen_nodes = [('done', True, True, True), ('to_valid', True, True, False), ('exo', True, True, False), ]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'paulla.ircbot'
copyright = u'2014, Michael Ricart'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import pkg_resources
version = pkg_resources.get_distribution(project).version
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of | this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'paullaircbotdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# | 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [('index', 'paullaircbot.tex', u'paulla.ircbot Documentation',
u'Michael Ricart', 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', 'paullaircbot', u'paulla.ircbot Documentation',
[u'Michael Ricart'], 1)]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [('index', 'paullaircbot', u'paulla.ircbot Documentation',
u'Michael Ricart', 'paullaircbot' |
SnowOnion/CodeForcesLee | dp/testStackDepth.py | Python | mit | 146 | 0.054795 | ''' |
# Runtime:
python 2.7.3 (win)
# Result:
0
1
<...>
998
boom
'''
def f(n):
print | n
f(n+1)
try:
f(0)
except:
print 'boom' |
pyembed/pyembed-rst | setup.py | Python | mit | 2,816 | 0.00071 | # The MIT License(MIT)
# Copyright (c) 2013-2014 Matt Thomson
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from setuptools import setup
from setuptools.command.test import test as TestCommand
import sys
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
se | lf.test_args = ['pyembed']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main( | self.test_args)
sys.exit(errno)
setup(
name='pyembed-rst',
version='1.1.0',
author='Matt Thomson',
author_email='mattjohnthomson@gmail.com',
url='http://pyembed.github.io',
description='Python reStructuredText directive for embedding content using OEmbed',
long_description=open('README.rst').read() + '\n\n' +
open('CHANGES.rst').read(),
download_url='https://pypi.python.org/pypi/pyembed-rst/',
license=open('LICENSE.txt').read(),
provides=['pyembed.rst'],
packages=['pyembed.rst'],
namespace_packages=['pyembed'],
install_requires=[
'pyembed',
'docutils'
],
tests_require=[
'mock',
'pytest',
'vcrpy'
],
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Text Processing'
]
)
|
googleads/google-ads-python | google/ads/googleads/v8/services/services/campaign_criterion_simulation_service/transports/base.py | Python | apache-2.0 | 3,895 | 0.000513 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v8.resources.types import (
campaign_criterion_simulation,
)
from google.ads.googleads.v8.services.types import (
campaign_criterion_simulation_service,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class CampaignCriterionSimulationServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for CampaignCriterionSimulationService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
| your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults | .
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_campaign_criterion_simulation: gapic_v1.method.wrap_method(
self.get_campaign_criterion_simulation,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_campaign_criterion_simulation(
self,
) -> typing.Callable[
[
campaign_criterion_simulation_service.GetCampaignCriterionSimulationRequest
],
campaign_criterion_simulation.CampaignCriterionSimulation,
]:
raise NotImplementedError
__all__ = ("CampaignCriterionSimulationServiceTransport",)
|
samuelclay/NewsBlur | utils/archive/bootstrap_intel.py | Python | mit | 1,384 | 0.00289 | import sys
from mongoengine.queryset import OperationError
from mongoengine.errors import ValidationError
from apps.analyzer.models import MClassifierFeed
from apps.analyzer.models import MClassifierAuthor
from apps.analyzer.models i | mport MClassifierTag
from apps.analyzer.models import MClassifierTitle
for classifier_cls in [MClassifierFeed, MClassifierAuthor,
MClassifierTag, MClassifierTitle]:
print(" ================================================================= ")
print((" Now on %s " % classifier_cls.__name__))
print(" ================================================================= | ")
classifiers = classifier_cls.objects.filter(social_user_id__exists=False)
count = classifiers.count()
print((" ---> Found %s classifiers" % count))
for i, classifier in enumerate(classifiers):
if i % 1000 == 0:
print((" ---> %s / %s" % (i, count)))
sys.stdout.flush()
classifier.social_user_id = 0
try:
classifier.save()
except OperationError as e:
print((" ***> Operation error on: %s" % e))
sys.stdout.flush()
# classifier.delete()
except ValidationError as e:
print((" ***> ValidationError error on: %s" % e))
print((" ***> Original classifier: %s" % classifier.__dict__))
|
great-expectations/great_expectations | great_expectations/rule_based_profiler/domain_builder/__init__.py | Python | apache-2.0 | 990 | 0.007071 | from great_expectations.rule_based_profiler.domain_builder.domain_builder import ( # isort:skip
DomainBuilder,
)
from great_expectations.rule_based_profiler.domain_builder.table_domain_builder import ( # isort:skip
TableDomainBuilder,
)
from great_expectations.rule_based_profiler.domain_builder.column_domain_builder import ( # | isort:skip
ColumnDomainBuilder,
)
from great_expectations.rule_based_profiler.domain_builder.simple_column_suffix_domain_builder import ( # isort:skip
SimpleColumnSuffixDomainBuilder,
)
from great_expectations.rule_based_profiler.domain_builder.categorical_column_domain_builder import ( # isort:skip
CategoricalColumnDomainBuilder,
)
from great_expectations.rule_based_profiler.domain_builder.map_metric_column_domain | _builder import (
MapMetricColumnDomainBuilder,
)
from great_expectations.rule_based_profiler.domain_builder.simple_semantic_type_domain_builder import ( # isort:skip
SimpleSemanticTypeColumnDomainBuilder,
)
|
classcat/cc-prism-hids | main/pylib/ccprism/search.py | Python | gpl-3.0 | 23,411 | 0.00487 | """
/**
* Ossec Framework
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* @category Ossec
* @package Ossec
* @version $Id: Histogram.php,v 1.3 2008/03/03 15:12:18 dcid Exp $
* @author Chris Abernethy
* @copyright Copyright (c) 2007-2008, Daniel B. Cid <dcid@ossec.net>, All rights reserved.
* @license http://www.gnu.org/licenses/gpl-3.0.txt GNU Public License
*/
"""
##############################################################
# Copyright C) 2015 Masashi Okumura All rights reseerved.
##############################################################
import os,sys
import re
from flask import Flask, session, request, redirect, render_template, url_for
from flask import jsonify, make_response
from datetime import *
import time
import uuid
import hashlib
import ossec_conf
import os_lib_handle
import os_lib_agent
import os_lib_alerts
#import os_lib_sysche | ck
from ossec_categories import global_categories
from ossec_formats import log_ca | tegories
from .view import View
class Search(View):
def __init__(self, request):
super().__init__()
self.request = request
self.html = ""
self.contents= ""
self.is_post = False
if request.method == 'POST':
self.is_post = True
self._make_contents()
self._make_html()
def _make_contents(self):
# Starting handle
ossec_handle = os_lib_handle.os_handle_start(ossec_conf.ossec_dir)
# Iniitializing some variables
u_final_time = int(time.time())
#u_final_time = int(time.mktime(datetime.now().timetuple()))
u_init_time = int(u_final_time - ossec_conf.ossec_search_time) # 14400 = 3600 * 4
u_level = ossec_conf.ossec_search_level # 7
u_pattern = ""
u_rule = ""
u_srcip = ""
u_user = ""
u_location = ""
# masao added the folloings :
USER_final = 0
USER_init = 0
USER_level = ""
USER_pattern = None
LOCATION_pattern = None
USER_group = None
USER_log = None
USER_rule = None
USER_srcip = None
USER_user = None
USER_page = int(1)
USER_searchid = 0
USER_monitoring = 0
used_stored = 0
buffer = ""
# Getting search id
if self.is_post and ('searchid' in self.request.form):
str_searchid = self.request.form.get('searchid')
if re.search("[a-z0-9]+", str_searchid):
USER_searchid = str_searchid # It might be hex. dont use int().
is_rt_monitoring = False
# TODO : real time monitoring t.b. implemented.
rt_sk = ""
sv_sk = 'checked="checked"'
if self.is_post and ('monitoring' in self.request.form):
str_monitoring = self.request.form.get('monitoring')
if int(str_monitoring) == 1:
is_rt_monitoring = True
rt_sk = 'checked="checked"'
sv_sk = "";
# Cleaning up time
USER_final = u_final_time
USER_init = u_init_time
USER_monitoring = 1
# Cleaning up fields
# $_POST['search'] = "Search";
# unset($_POST['initdate']);
# unset($_POST['finaldate']);
# Deleting search
if USER_searchid != 0:
os_lib_alerts.os_cleanstored(USER_searchid)
# Refreshing every 90 seconds by default */
m_ossec_refresh_time = ossec_conf.ossec_refresh_time * 1000;
buffer += """\
<script language="javascript">
setTimeout("document.dosearch.submit()", %d);
</script>\n""" % m_ossec_refresh_time
# Reading user input -- being very careful parsing it
# Initial Date
datepattern = "^([0-9]{4})-([0-9]{2})-([0-9]{2}) ([0-9]{2}):([0-9]{2})$";
if is_rt_monitoring:
pass
elif self.is_post and ('initdate' in self.request.form):
str_initdate = self.request.form.get('initdate')
mobj = re.search(datepattern, str_initdate)
if mobj:
year = int(mobj.group(1))
month = int(mobj.group(2))
day = int(mobj.group(3))
hour = int(mobj.group(4))
minute = int(mobj.group(5))
USER_init = int(time.mktime((year, month, day, hour, minute, 0, 0, 0, -1)))
u_init_time = USER_init
# to check :
# print(datetime.fromtimestamp(u_init_time))
# Final Date
if is_rt_monitoring:
pass
elif self.is_post and ('finaldate' in self.request.form):
str_finaldate = self.request.form.get('finaldate')
mobj = re.search(datepattern, str_finaldate)
if mobj:
year = int(mobj.group(1))
month = int(mobj.group(2))
day = int(mobj.group(3))
hour = int(mobj.group(4))
minute = int(mobj.group(5))
USER_final = int(time.mktime((year, month, day, hour, minute, 0, 0, 0, -1)))
u_final_time = USER_final
# Level
if self.is_post and ('level' in self.request.form):
str_level = self.request.form.get('level')
if str_level and str_level.isdigit() and (int(str_level) > 0) and (int(str_level) < 16):
USER_level = str_level
u_level = str_level
# Page
if self.is_post and ('page' in self.request.form):
str_page = self.request.form.get('page')
if str_page and str_page.isdigit() and (int(str_page) > 0) and (int(str_page) <= 999):
USER_page = str_page
# Pattern
strpattern = "^[0-9a-zA-Z. _|^!\-()?]{1,128}$"
intpattern = "^[0-9]{1,8}$"
if self.is_post and ('strpattern' in self.request.form):
str_strpattern = self.request.form.get('strpattern')
if re.search(strpattern, str_strpattern):
USER_pattern = str_strpattern
u_pattern = USER_pattern
# Getting location
if self.is_post and ('locationpattern' in self.request.form):
lcpattern = "^[0-9a-zA-Z. _|^!>\/\\-]{1,156}$"
str_locationpattern = self.request.form.get('locationpattern')
if re.search(lcpattern, str_locationpattern):
LOCATION_pattern = str_locationpattern
u_location = LOCATION_pattern
# Group pattern
if self.is_post and ('grouppattern' in self.request.form):
str_grouppattern = self.request.form.get('grouppattern')
if str_grouppattern == "ALL":
USER_group = None
elif re.search(strpattern, str_grouppattern):
USER_group = str_grouppattern
pass
# Log pattern
if self.is_post and ('logpattern' in self.request.form):
str_logpattern = self.request.form.get('logpattern')
if str_logpattern == "ALL":
USER_log = None
elif re.search(strpattern, str_logpattern):
USER_log = str_logpattern
# Rule pattern
if self.is_post and ('rulepattern' in self.request.form):
str_rulepattern = self.request.form.get('rulepattern')
if re.search(strpattern, str_rulepattern):
USER_rule = str_rulepattern
u_rule = USER_rule
# Src ip patt |
macarthur-lab/xbrowse | deploy/docker/seqr/config/local_settings.py | Python | agpl-3.0 | 2,316 | 0.004318 | import os
import pymongo
import imp
# django stuff
reference_data_dir = '../data/reference_data'
#DEBUG = True
#COMPRESS_ENABLED = False
GENERATED_FILES_DIR = os.path.join(os.path.dirname(__file__), 'generated_files')
MEDIA_ROOT = os.path.join(GENERATED_FILES_DIR , 'media/')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('POSTGRES_SERVICE_HOSTNAME', 'localhost'),
'PORT': int(os.environ.get('POSTGRES_SERVICE_PORT', '5432')),
'NAME': 'seqrdb',
'USER': os.environ.get('POSTGRES_USERNAME', 'postgres'),
'PASSWORD': os.environ.get('POSTGRES_PASSWORD', ''),
}
}
ALLOWED_HOSTS = ['*']
EMAIL_BACKEND = "anymail.backends.postmark.EmailBackend"
DEFAULT_FROM_EMAIL = "seqr@broadinstitute.org"
ANYMAIL = {
#"SENDGRID_API_KEY": os.environ.get('SENDGRID_API_KEY', 'sendgrid-api-key-placeholder'),
"POSTMARK_SERVER_TOKEN": os.environ.get('POSTMARK_SERVER_TOKEN', 'postmark-server-token-placeholder'),
}
#
# xbrowse stuff
#
REFERENCE_SETTINGS = imp.load_source(
'reference_settings',
os.path.dirname(os.path.realpath(__file__)) + '/reference_settings.py'
)
CUSTOM_ANNOTATOR_SETTINGS = imp.load_source(
'custom_annotation_settings',
os.path.dirname(os.path.realpath(__file__)) + '/custom_annotator_settings.py'
)
ANNOTATOR_SETTINGS = imp.load_source(
'annotator_settings',
os.path.dirname(os.path.realpath(__file__)) + '/annotator_settings.py'
)
_conn = pymongo.MongoClient(host=os.environ.get('MONGO_SERVICE_HOSTNAME', 'localhost:27017'))
DATASTORE_DB = _conn['xbrowse_datastore']
POPULATION_DATASTORE_DB = _conn['xbrowse_pop_datastore']
DEFAULT_CONTROL_COHORT = 'controls'
CONTROL_COHORTS = [
{
'slug': 'controls',
'vcf': '',
},
]
UPLOADED_PEDIGREE_FILE_RECIPIENTS = []
COVERAGE_DB = _conn['xbrowse_coverage']
PROJECT_DATASTORE_DB = _conn['xbrowse_proj_store']
CNV_STORE_DB_NAME = 'xbrowse_cnvs'
CUSTOM_POPULATIONS_DB = _conn['xcustom_refpops']
COVERAGE | _DB = _conn['coverage']
READ_VIZ_BAM_PATH = 'https://broad-seqr'
READ_VIZ_CRAM_PATH = 'broad-seqr:5000'
READ_VIZ_USERNAME = "xbrowse-bams"
READ_VIZ_PASSWD = "xbrowse-bams"
ADMINS = [
('Ben Weisburd', 'weisburd@ | broadinstitute.org'),
('Hana Snow', 'hsnow@broadinstitute.org'),
] |
ambasta/mcmd | mcmd/config.py | Python | gpl-3.0 | 251 | 0 | GSD_BUS_NAME = 'org.gnome.SettingsDaemon.Power'
GSD_OBJ_PATH = '/org/gnome/SettingsDaemon/Power'
GSD_SCR_NAME = 'org.gnome.SettingsD | aemon.Power.Screen'
BRI_PRO_NAME = 'Brig | htness'
FD_PRO_IFACE = 'org.freedesktop.DBus.Properties'
BRIGHTNESS_S = 1.6875
|
0nse/WikiWho | scripts/dataAnalysis/BlockedAfDUserExtraction.py | Python | mit | 3,833 | 0.010447 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@author: Michael Ruster
Extract the timestamp and user of contributions which were authored before the
respecitve user had been blocked. The result can be further processed by
TimeframeCalculations.py.
Requirements: MediaWiki Utilities, WikiWho DiscussionParser
Usage: python BlockedAfDUserExtraction.py -i /path/to/dumps/ -b /path/to/WikiParser/blocks.csv
'''
from mw.xml_dump import Iterator as mwIterator
from mw.xml_dump.functions import open_file
# Our very own little dependency hell:
from sys import path
path.append('../..')
path.append('../../WikiCodeCleaner')
path.append('../../functions')
import WikiWho
import BlockTimeCalculation
import TimeframeCalculations
import csv
import os.path
def extract(path, users, blocks):
output = '../data/blockedAfDUsers.csv'
if os.path.isfile(output):
raise IOError('[E] File "%s" already exists. Aborting.' % output)
with open(output, 'a') as output:
writer = csv.writer(output, delimiter='\t',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for fileName in WikiWho.extractFileNamesFromPath(path):
print('[I] Now processing the file "%s".' % fileName)
# Access the file.
dumpIterator = mwIterator.from_file(open_file(fileName))
for page in dumpIterator:
for revision in page:
if revision.contributor:
# participated in AfD and was blocked:
if revision.contributor.user_text in users:
secondsToBlock = BlockTimeCalculation.calculateSecondsUntilNextBlock(blocks, revision.contributor.user_text, revision.timestamp)
# If secondsToBlock is -1, the user was once blocked but this post
# belongs to the time after, when she was never blocked again.
if secondsToBlock != -1:
writer.writerow([revision.timestamp,
revision.contributor.id,
revision.contributor.user_text,
int(revision.id),
page.title,
secondsToBlock])
if __name__ == '__main__':
import argparse, pickle, os
parser = argparse.ArgumentParser(description='A method for extracting timestamps of users that have been blocked at least once and have participated in an AfD at least once.',
epilog='''
WikiWho DiscussionParser, Copyright (C) 2015 Fabian Flöck, Maribel Acosta, Michael Ruster (based on wikiwho by Fabian Flöck, Maribel Acosta).
WikiWho DiscussionParser comes with ABSOLUTELY NO WARRANTY. This is free software, and you are welcome to redistribute it under certain conditions. For more information, see the LICENSE and README.md files this program should have been distributed with.
''')
parser.add_argument('-i', dest='pageDumpPath', required=True,
help='Path to the Wikipedia page(s) dum | p (XML, 7z, bz2…).')
parser.add_argument('-b', dest='blockLog', type=argparse.FileType('r') | ,
default=None, nargs='?',
help='Path to the block log file produced with 0nse/WikiParser (CSV).'),
args = parser.parse_args()
usersFile = '../data/blockedAfDUserNames_temp.pkl'
TimeframeCalculations.extractLastPostToBlockDeltas(usersOutputFile=usersFile)
# If you stumble upon this code and ask yourself: why pickle? Why not just a
# return parameter? I wanted to try pickle for once. That's all.
with open(usersFile, 'rb') as inputFile:
users = pickle.load(inputFile)
os.remove(usersFile)
blocks = BlockTimeCalculation.createBlockedUsersDict(args.blockLog)
extract(args.pageDumpPath, users, blocks)
|
patrickwestphal/owlapy | owlapy/model/owldataintersectionof.py | Python | gpl-3.0 | 1,028 | 0 | from .owldatarangevisitor import OWLDataRangeVisitor, OWLDataRangeVisitorEx
from .owldatavisitor import OWLDataVisitor, OWLDataVisitorEx
from .owlnarydatarange import OWLNaryDataRange
from .owlobjectvisitor import OWLObjectVisitor, OWLObjectVisitorEx
from owlapy.util import accept_default, accept_default_ex
class OWLDataIntersection | Of(OWLNaryDataRange):
"""TODO: implement"""
def __init__(self, operands):
"""
:param operands: a set of owlapy.model.OWLDataRange objects
"""
super().__init__(operands)
self._accept_fn_for_visitor_cls[OWLDataRangeVisitor] = accept_default
self._a | ccept_fn_for_visitor_cls[OWLDataRangeVisitorEx] = \
accept_default_ex
self._accept_fn_for_visitor_cls[OWLDataVisitor] = accept_default
self._accept_fn_for_visitor_cls[OWLDataVisitorEx] = accept_default_ex
self._accept_fn_for_visitor_cls[OWLObjectVisitor] = accept_default
self._accept_fn_for_visitor_cls[OWLObjectVisitorEx] = accept_default_ex
|
dragorosson/heat | heat/db/sqlalchemy/migrate_repo/versions/064_raw_template_predecessor.py | Python | apache-2.0 | 1,809 | 0 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import migrate
import sqlalchemy
from heat.db.sqlalchemy import utils as migrate_utils
def upgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
upgrade_sqlite(migrate_engine)
return
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
tmpl_table = sqlalchemy.Table('raw_template', meta, autoload=True)
# drop constraint
fkey = migrate.ForeignKeyConstraint(
columns=[tmpl_table.c.predecessor],
refcolumns=[tmpl_table.c.id],
name='predecessor_fkey_ref') |
fkey.drop()
tmpl_table.c.predecessor.drop()
def upgrade_sqlite(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
tmpl_table = sqlalchemy.Table('raw_template', meta, autoload=True)
ignorecols = [tmpl_table.c.predecessor.name]
new_template = migrate_utils.clone_table('new_raw_template',
tmpl_table,
| meta, ignorecols=ignorecols)
# migrate stacks to new table
migrate_utils.migrate_data(migrate_engine,
tmpl_table,
new_template,
skip_columns=['predecessor'])
|
bgpfu/bgpfu | src/bgpfu/prefixlist/simple.py | Python | apache-2.0 | 3,810 | 0.000262 | # Copyright (C) 2016 Matt Griswold <grizz@20c.com>
#
# This file is part of bgpfu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
import ipaddress
from bgpfu.prefixlist import PrefixListBase
def _try_combine(aggregate, current):
"try combining and replacing the last element on the aggregate list"
if aggregate and aggregate[-1]:
supernet = aggregate[-1].supernet()
if supernet == current.supernet():
aggregate[-1] = supernet
return True
return False
def _do_aggregate(prefixlist):
if len(prefixlist) <= 1:
return prefixlist
prefixlist = sorted(prefixlist)
# TODO check for default and skip it?
aggregate = []
while True:
current = None
for pfx in prefixlist:
if not current:
current = pfx
continue
if current.overlaps(pfx):
continue
# try joining 2
supernet = current.supernet()
if supernet == pfx.supernet():
current = supernet
continue
# nothing to combine, shift
aggregate.append(current)
current = pfx
if current:
if not _try_combine(aggregate, current):
aggregate.append(current)
if len(aggregate) == len(prefixlist):
return aggregate
prefixlist = aggregate
aggregate = []
class SimplePrefixList(P | refixListBase, collections.abc.MutableSequence):
"""
Simple PrefixList implemenatation using collections
*NOTE* loses prefix length info on aggregate
"""
def __init__(self, prefixes=None):
if prefixes:
self._prefixes = list(map(ipaddress.ip_network, list(map(str, prefixes))))
else:
self._prefixes = []
def __getitem__(self, i):
return self._prefixes[i]
def __setitem__(self, i, v):
self._prefixes[i] = self.check_val(v)
| def insert(self, i, v):
self._prefixes.insert(i, self.check_val(v))
def iter_add(self, it):
for v in it:
self._prefixes.append(self.check_val(v))
def __delitem__(self, i):
del self._prefixes[i]
def __len__(self):
return len(self._prefixes)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._prefixes == other._prefixes
raise TypeError("object not PrefixList type")
def __ne__(self, other):
return not self == other
def __str__(self):
return str(self._prefixes)
@property
def ipv4(self):
return [p for p in self._prefixes if p.version == 4]
@property
def ipv6(self):
return [p for p in self._prefixes if p.version == 6]
def str_list(self):
return list(map(str, self._prefixes))
def aggregate(self):
"returns a PrefixList containing the result of aggregating the list"
if len(self._prefixes) == 1:
return self.__class__(self._prefixes)
# v4 = sorted(self._prefixes)
v4 = [p for p in self._prefixes if p.version == 4]
v6 = [p for p in self._prefixes if p.version == 6]
v4 = _do_aggregate(v4)
v6 = _do_aggregate(v6)
return self.__class__(v4 + v6)
|
googleapis/python-aiplatform | samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_executions_async.py | Python | apache-2.0 | 1,664 | 0.001803 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for PurgeExecutions
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1beta1_Metadat | aService_PurgeExecutions_async]
from google.cloud import aiplatform_v1beta1
async def sample_purge_executions():
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.PurgeExecutionsRequest(
parent="parent_value",
filter="filter_value",
)
# Make the request
operation = client.purge_executions(request= | request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeExecutions_async]
|
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/io/tests/test_stata.py | Python | apache-2.0 | 56,765 | 0.000018 | # -*- coding: utf-8 -*-
# pylint: disable=E1101
import datetime as dt
import os
import struct
import sys
import warnings
from datetime import datetime
from distutils.version import LooseVersion
import nose
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import compat
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, | StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
from pandas.t | slib import NaT
from pandas.types.common import is_categorical_dtype
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
self.dta22_118 = os.path.join(self.dirpath, 'stata14_118.dta')
self.dta23 = os.path.join(self.dirpath, 'stata15.dta')
self.dta24_111 = os.path.join(self.dirpath, 'stata7_111.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path, write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
with StataReader(self.dta1_114) as rdr:
with warnings.catch_warnings(record=True) as w: # noqa
parsed_114_data = rdr.data()
with StataReader(self.dta1_114) as rdr:
parsed_114_read = rdr.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
parsed_114 = self.read_dta(self.dta1_114)
parsed_117 = self.read_dta(self.dta1_117)
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
self.assertEqual(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected,
check_datetimelike_compat=True)
tm.assert_frame_equal(parsed_115, expected,
check_datetimelike_compat=True)
tm.assert_frame_equal(parsed_117, expected,
check_datetimelike_compat=True)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self. |
DigitalCampus/django-ujjwal-oppia | oppia/viz/urls.py | Python | gpl-3.0 | 1,094 | 0.009141 | # oppia/viz/urls.py
from django.conf import settings
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^summary/$', 'oppia.viz.views.summary_view', name="oppia_viz_summary"),
url(r'^user-registrations/$', 'oppia.viz.views.user_registrations_view', name="oppia_viz_user_registrations"),
url(r'^acti | vity-by-country/$', 'oppia.viz.views.activity_by_country_view',
name="oppia_viz_activity_by_country"),
url(r'^course-activity/$', 'oppia.viz.views.course_activity_view', name="oppia_viz_course_activity"),
url(r'^course-downloads/$', 'oppia.viz.views.course_downloads_v | iew', name="oppia_viz_course_downloads"),
url(r'^method-mixes/$', 'oppia.viz.views.method_mixes_view', name="oppia_viz_method_mixes"),
url(r'^unique-repeat-clients/$', 'oppia.viz.views.clients_view', name="oppia_viz_clients"),
url(r'^films-for-method/$', 'oppia.viz.views.films_for_method_view', name="oppia_viz_films_for_methods"),
url(r'^map/$', 'oppia.viz.views.map_view', name="oppia_viz_map"),
) |
sametmax/Django--an-app-at-a-time | ignore_this_directory/ignore_me/views.py | Python | mit | 391 | 0 | # coding: utf-8
from _ | _future__ import unicode_literals
import re
import os
from django.shortcuts import render
from django.conf import settings
def index(request):
p = re.compile(r'^app\d+_')
apps = (a.split('_') for a in settings.INSTALLED_APPS if p.match(a))
return render(request, 'ignore_me/ind | ex.html',
{"apps": sorted(apps), "settings": settings})
|
barak/ninja | misc/ninja_syntax.py | Python | apache-2.0 | 5,166 | 0.002904 | #!/usr/bin/python
"""Python module for generating | .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_path(word):
return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
| for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def rule(self, name, command, description=None, depfile=None,
generator=False, restat=False, rspfile=None, rspfile_content=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = list(map(escape_path, outputs))
all_inputs = list(map(escape_path, all_inputs))
if implicit:
implicit = map(escape_path, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_path, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s %s' % (' '.join(out_outputs),
rule,
' '.join(all_inputs)))
if variables:
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
|
1337/yesterday-i-learned | leetcode/42h.py | Python | gpl-3.0 | 1,171 | 0 | from typing import List
# Scan left, scan right, subtract actual heights, sum them all up
class Solution:
def trap(self, height: List[int]) -> int:
if not height:
return 0
left_heights = [0] * len(height)
right_heights = [0] * len(height)
left_max = 0
for idx, cur in enumerate(height):
left_heights[idx] = max(left_max, cur)
if cur > left_max:
left_max = cur
right_max = 0
for flip, cur in enum | erate(reversed(height)):
idx = len(height) - flip - 1
right_heights[idx] = max(right_max, cur)
if cur > right_max:
right_max = cur
print(left_heights, right_heights)
mins = []
for idx, pairs in enumerate(zip( | left_heights, right_heights)):
mins.append(min(pairs) - height[idx])
print(mins)
return sum(mins)
a = Solution()
assert a.trap([4, 2, 3]) == 1
assert a.trap([1, 0, 1]) == 1
assert a.trap([1]) == 0
assert a.trap([2, 1, 2]) == 1
assert a.trap([0, 0, 0]) == 0
assert a.trap([0, 1, 0]) == 0
assert a.trap([0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1]) == 6
|
karllessard/tensorflow | tensorflow/lite/testing/generate_examples_report.py | Python | apache-2.0 | 4,055 | 0.009618 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Make HTML tables that report where TF and TOCO failed to convert models.
This is primarily used by generate_examples.py. See it or
`make_report_table` for more details on usage.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import html
import json
FAILED = "FAILED"
SUCCESS = "SUCCESS"
NOTRUN = "NOTRUN"
def make_report_table(fp, title, reports):
"""Make an HTML report of the success/failure reports.
Args:
fp: File-like object in which to put the html.
title: "Title of the zip file this pertains to."
reports: a list of conversion attempts. (report_args, report_vals) i.e.
({"shape": [1,2,3], "type": "tf.float32"},
{"tf": "SUCCESS", "toco": "FAILURE", "toco_log": "Unsupported type.",
"tf_log": ""})
"""
# sort reports by if TOCO failure and then TF failure (reversed)
reports.sort(key=lambda x: x[1]["toco"], reverse=False)
reports.sort(key=lambda x: x[1]["tf"], reverse=True)
def result_cell(x, row, col):
"""Produce a cell with the condition string `x`."""
s = html.escape(repr(x), quote=True)
color = "#44ff44" if x == SUCCESS else (
"#ff4444" if x == FAILED else "#eeeeee")
handler = "ShowLog(%d, %d)" % (row, col)
fp.write("<td style='background-color: %s' onclick='%s'>%s</td>\n" % (
color, handler, s))
fp.write("""<html>
<head>
<title>tflite report</title>
<style>
body { font-family: Arial; }
th { background-color: #555555; color: #eeeeee; }
td { vertical-align: top; }
td.horiz {width: 50%;}
pre { white-space: pre-wrap; word-break: keep-all; }
table {width: 100%;}
</style>
</head>
""")
# Write the log data to a javascript variable and also make a function
# in javascript to show the log when an item is clicked.
fp.write("<script> \n")
fp.write("""
function ShowLog(row, col) {
var log = document.getElementById("log");
log.innerHTML = "<pre>" + data[row][col] + "</pre>";
}
""")
fp.write("var data = \n")
fp.write(json.dumps([[html.escape(x[1]["tf_log"], quote=True),
html.escape(x[1]["toco_log"], quote=True)]
for x in reports]))
fp.write(";</script>\n")
# Wr | ite the main table and use onclick on the items that have log items.
fp.write("""
<body>
<h1>TOCO Conversion</h1>
<h2>%s</h2>
"" | " % title)
# Get a list of keys that are in any of the records.
param_keys = {}
for params, _ in reports:
for k in params.keys():
param_keys[k] = True
fp.write("<table>\n")
fp.write("<tr><td class='horiz'>\n")
fp.write("<div style='height:1000px; overflow:auto'>\n")
fp.write("<table>\n")
fp.write("<tr>\n")
for p in param_keys:
fp.write("<th>%s</th>\n" % html.escape(p, quote=True))
fp.write("<th>TensorFlow</th>\n")
fp.write("<th>TOCO</th>\n")
fp.write("</tr>\n")
for idx, (params, vals) in enumerate(reports):
fp.write("<tr>\n")
for p in param_keys:
fp.write(" <td>%s</td>\n" % html.escape(repr(params[p]), quote=True))
result_cell(vals["tf"], idx, 0)
result_cell(vals["toco"], idx, 1)
fp.write("</tr>\n")
fp.write("</table>\n")
fp.write("</div>\n")
fp.write("</td>\n")
fp.write("<td class='horiz' id='log'></td></tr>\n")
fp.write("</table>\n")
fp.write("<script>\n")
fp.write("</script>\n")
fp.write("""
</body>
</html>
""")
|
supercheetah/diceroller | pyinstaller/buildtests/import/test_app_with_plugins.py | Python | artistic-2.0 | 2,260 | 0 | #
# Copyright (C) 2012, Daniel Hyams
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# This little sample application generates a plugin on the fly,
# and then tries to import it.
import os
import sys
# We first import a sta | tic plugin; the application might have
# | certain plugins that it always loads.
try:
print('Attempting to import static_plugin...')
mdl = __import__('static_plugin')
except ImportError:
raise SystemExit('Failed to import the static plugin.')
plugin_contents = """
print('DYNAMIC PLUGIN IMPORTED.')
print('This is some user-generated plugin that does not exist until')
print(' the application starts and other modules in the directory')
print(' are imported (like the static_plugin).')
"""
# Create the dynamic plugin in the same directory as the executable.
if hasattr(sys, 'frozen'):
program_dir = os.path.abspath(sys.prefix)
else:
program_dir = os.path.dirname(os.path.abspath(__file__))
plugin_filename = os.path.join(program_dir, 'dynamic_plugin.py')
fp = open(plugin_filename, 'w')
fp.write(plugin_contents)
fp.close()
# Try import dynamic plugin.
is_error = False
try:
print('Attempting to import dynamic_plugin...')
mdl = __import__('dynamic_plugin')
except ImportError:
is_error = True
# Clean up. Remove files dynamic_plugin.py[c]
for f in (plugin_filename, plugin_filename + 'c'):
try:
os.remove(plugin_filename)
except OSError:
pass
# Statement 'try except finally' is available since Python 2.5+.
if is_error:
# Raise exeption.
raise SystemExit('Failed to import the dynamic plugin.')
|
levkar/odoo-addons | sale_multiple_invoice/wizard/__init__.py | Python | agpl-3.0 | 124 | 0 | # -*- c | oding: utf-8 -*-
import sale_make_invoice_advance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftw | idth=4:
|
CivilNet/Gemfield | dockerfiles/py-faster-rcnn/files/gemfield/py-faster-rcnn/tools/train_svms.py | Python | gpl-3.0 | 13,475 | 0.000816 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Train post-hoc SVMs using the algorithm and hyper-parameters from
traditional R-CNN.
"""
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
from fast_rcnn.test import im_detect
from utils.timer import Timer
import caffe
import argparse
import pprint
import numpy as np
import numpy.random as npr
import cv2
from sklearn import svm
import os, sys
class SVMTrainer(object):
"""
Trains post-hoc detection SVMs for all classes using the algorithm
and hyper-parameters of traditional R-CNN.
"""
def __init__(self, net, imdb):
self.imdb = imdb
self.net = net
self.layer = 'fc7'
self.hard_thresh = -1.0001
self.neg_iou_thresh = 0.3
dim = net.params['cls_score'][0].data.shape[1]
scale = self._get_feature_scale()
print('Feature dim: {}'.format(dim))
print('Feature scale: {:.3f}'.format(scale))
self.trainers = [SVMClassTrainer(cls, dim, feature_scale=scale)
for cls in imdb.classes]
def _get_feature_scale(self, num_images=100):
TARGET_NORM = 20.0 # Magic value from traditional R-CNN
_t = Timer()
roidb = self.imdb.roidb
total_norm = 0.0
count = 0.0
inds = npr.choice(range(self.imdb.num_images), size=num_images,
replace=False)
for i_, i in enumerate(inds):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
total_norm += np.sqrt((feat ** 2).sum(axis=1)).sum()
count += feat.shape[0]
print('{}/{}: avg feature norm: {:.3f}'.format(i_ + 1, num_images,
total_norm / count))
return TARGET_NORM * 1.0 / (total_norm / count)
def _get_pos_counts(self):
counts = np.zeros((len(self.imdb.classes)), dtype=np.int)
roidb = self.imdb.roidb
for i in range(len(roidb)):
for j in range(1, self.imdb.num_classes):
I = np.where(roidb[i]['gt_classes'] == j)[0]
counts[j] += len(I)
for j in range(1, self.imdb.num_classes):
print('class {:s} has {:d} positives'.
format(self.imdb.classes[j], counts[j]))
return counts
def get_pos_examples(self):
counts = self._get_pos_counts()
for i in range(len(counts)):
self.trainers[i].alloc_pos(counts[i])
_t = Timer()
| roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in range(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
gt_inds = np.where(roidb[i]['gt_classes'] > 0)[0]
gt_boxes = roidb[i]['boxes'][gt_inds]
_t.tic()
scores, boxes = im_detect(self.net, im, gt_boxes)
_t.toc()
| feat = self.net.blobs[self.layer].data
for j in range(1, self.imdb.num_classes):
cls_inds = np.where(roidb[i]['gt_classes'][gt_inds] == j)[0]
if len(cls_inds) > 0:
cls_feat = feat[cls_inds, :]
self.trainers[j].append_pos(cls_feat)
print('get_pos_examples: {:d}/{:d} {:.3f}s' \
.format(i + 1, len(roidb), _t.average_time))
def initialize_net(self):
# Start all SVM parameters at zero
self.net.params['cls_score'][0].data[...] = 0
self.net.params['cls_score'][1].data[...] = 0
# Initialize SVMs in a smart way. Not doing this because its such
# a good initialization that we might not learn something close to
# the SVM solution.
# # subtract background weights and biases for the foreground classes
# w_bg = self.net.params['cls_score'][0].data[0, :]
# b_bg = self.net.params['cls_score'][1].data[0]
# self.net.params['cls_score'][0].data[1:, :] -= w_bg
# self.net.params['cls_score'][1].data[1:] -= b_bg
# # set the background weights and biases to 0 (where they shall remain)
# self.net.params['cls_score'][0].data[0, :] = 0
# self.net.params['cls_score'][1].data[0] = 0
def update_net(self, cls_ind, w, b):
self.net.params['cls_score'][0].data[cls_ind, :] = w
self.net.params['cls_score'][1].data[cls_ind] = b
def train_with_hard_negatives(self):
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in range(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
for j in range(1, self.imdb.num_classes):
hard_inds = \
np.where((scores[:, j] > self.hard_thresh) &
(roidb[i]['gt_overlaps'][:, j].toarray().ravel() <
self.neg_iou_thresh))[0]
if len(hard_inds) > 0:
hard_feat = feat[hard_inds, :].copy()
new_w_b = \
self.trainers[j].append_neg_and_retrain(feat=hard_feat)
if new_w_b is not None:
self.update_net(j, new_w_b[0], new_w_b[1])
print(('train_with_hard_negatives: '
'{:d}/{:d} {:.3f}s').format(i + 1, len(roidb),
_t.average_time))
def train(self):
# Initialize SVMs using
# a. w_i = fc8_w_i - fc8_w_0
# b. b_i = fc8_b_i - fc8_b_0
# c. Install SVMs into net
self.initialize_net()
# Pass over roidb to count num positives for each class
# a. Pre-allocate arrays for positive feature vectors
# Pass over roidb, computing features for positives only
self.get_pos_examples()
# Pass over roidb
# a. Compute cls_score with forward pass
# b. For each class
# i. Select hard negatives
# ii. Add them to cache
# c. For each class
# i. If SVM retrain criteria met, update SVM
# ii. Install new SVM into net
self.train_with_hard_negatives()
# One final SVM retraining for each class
# Install SVMs into net
for j in range(1, self.imdb.num_classes):
new_w_b = self.trainers[j].append_neg_and_retrain(force=True)
self.update_net(j, new_w_b[0], new_w_b[1])
class SVMClassTrainer(object):
"""Manages post-hoc SVM training for a single object class."""
def __init__(self, cls, dim, feature_scale=1.0,
C=0.001, B=10.0, pos_weight=2.0):
self.pos = np.zeros((0, dim), dtype=np.float32)
self.neg = np.zeros((0, dim), dtype=np.float32)
self.B = B
self.C = C
self.cls = cls
self.pos_weight = pos_weight
self.dim = dim
self.feature_scale = feature_scale
self.svm = svm.LinearSVC(C=C, class_weight={1: 2, -1: 1},
intercept_scaling=B, verbose=1,
penalty='l2', loss='l1',
random_state=cfg.RNG_SEED, dual=True)
self.pos_cur = 0
self.num_neg_added = 0
self.retrain_limit = 2000
self.evict_thresh = -1.1
self.loss_history = []
def alloc_pos(self, count):
|
Glottotopia/aagd | moin/local/moin/build/lib.linux-x86_64-2.6/MoinMoin/action/fullsearch.py | Python | mit | 10,979 | 0.004099 | # -*- coding: iso-8859-1 -*-
"""
MoinMoin - fullsearch action
This is the backend of the search form. Search pages and print results.
@copyright: 2001 by Juergen Hermann <jh@web.de>
@license: GNU GPL, see COPYING for details.
"""
import re, time
from MoinMoin.Page import Page
from MoinMoin import wikiutil
from parsedatetime.parsedatetime import Calendar
from MoinMoin.web.utils import check_surge_protect
def checkTitleSearch(request):
""" Return 1 for title search, 0 for full text search, -1 for idiot spammer
who tries to press all buttons at once.
When used in FullSearch macro, we have 'titlesearch' parameter with
'0' or '1'. In standard search, we have either 'titlesearch' or
'fullsearch' with localized string. If both missing, default to
True (might happen with Safari) if this isn't an advanced search.
"""
form = request.values
if 'titlesearch' in form and 'fullsearch' in form:
ret = -1 # spammer / bot
else:
try:
ret = int(form['titlesearch'])
except ValueError:
ret = 1
except KeyError:
ret = ('fullsearch' not in form and not isAdvancedSearch(request)) and 1 or 0
return ret
def isAdvancedSearch(request):
""" Return True if advanced search is requested """
try:
return int(request.values['advancedsearch'])
except KeyError:
return False
def searchHints(f, hints):
""" Return a paragraph showing hints for a search
@param f: the formatter to use
@param hints: list of hints (as strings) to show
"""
return ''.join([
f.paragraph(1, attr={'class': 'searchhint'}),
# this is illegal formatter usage anyway, so we can directly use a literal
"<br>".join(hints),
f.paragraph(0),
])
def execute(pagename, request, fieldname='value', titlesearch=0, statistic=0):
_ = request.getText
titlesearch = checkTitleSearch(request)
if titlesearch < 0:
check_surge_protect(request, kick=True) # get rid of spammer
return
advancedsearch = isAdvancedSearch(request)
form = request.values
# context is relevant only for full search
if titlesearch:
context = 0
elif advancedsearch:
context = 180 # XXX: hardcoded context count for advancedsearch
else:
context = int(form.get('context', 0))
# Get other form parameters
needle = form.get(fieldname, '')
case = int(form.get('case', 0))
regex = int(form.get('regex', 0)) # no interface currently
hitsFrom = int(form.get('from', 0))
highlight_titles = int(form.get('highlight_titles', 1))
highlight_pages = int(form.get('highlight_pages', 1))
mtime = None
msg = ''
historysearch = 0
# if advanced search is enabled we construct our own search query
if advancedsearch:
and_terms = form.get('and_terms', '').strip()
or_terms = form.get('or_terms', '').strip()
not_terms = form.get('not_terms', '').strip()
#xor_terms = form.get('xor_terms', '').strip()
categories = form.getlist('categories') or ['']
timeframe = form.get('time', '').strip()
language = form.getlist('language') or ['']
mimetype = form.getlist('mimetype') or [0]
excludeunderlay = form.get('excludeunderlay', 0)
nosystemitems = form.get('nosystemitems', 0)
historysearch = form.get('historysearch', 0)
mtime = form.get('mtime', '')
if mtime:
mtime_parsed = None
# get mtime from known date/time formats
for fmt in (request.user.datetime_fmt,
request.cfg.datetime_fmt, request.user.date_fmt,
request.cfg.date_fmt):
try:
mtime_parsed = time.strptime(mtime, fmt)
except ValueError:
continue
else:
break
if mtime_parsed:
mtime = time.mktime(mtime_parsed)
else:
# didn't work, let's try parsedatetime
cal = Calendar()
mtime_parsed, parsed_what = cal.parse(mtime)
# XXX it is unclear if usage of localtime here and in parsedatetime module is correct.
# time.localtime is the SERVER's local time and of no relevance to the user (being
# somewhere in the world)
# mktime is reverse function for localtime, so this maybe fixes it again!?
if parsed_what > 0 and mtime_parsed <= time.localtime():
mtime = time.mktime(mtime_parsed)
else:
mtime_parsed = None # we don't use invalid stuff
# show info
if mtime_parsed:
# XXX mtime_msg is not shown in some cases
mtime_msg = _("(!) Only pages changed since '''%s''' are being displayed!",
wiki=True) % request.user.getFormattedDateTime(mtime)
else:
mtime_msg = _('/!\\ The modification date you entered was not '
'recognized and is therefore not considered for the '
'search results!', wiki=True)
else:
mtime_msg = None
word_re = re.compile(r'(\"[\w\s]+"|\w+)', re.UNICODE)
needle = ''
if categories[0]:
needle += 'category:%s ' % ','.join(categories)
if language[0]:
needle += 'language:%s ' % ','.join(language)
if mimetype[0]:
needle += 'mimetype:%s ' % ','.join(mimetype)
if excludeunderlay:
needle += '-domain:underlay '
if nosystemitems:
needle += '-domain:system '
if and_terms:
needle += '(%s) ' % and_terms
if not_terms:
needle += '(%s) ' % ' '.join(['-%s' % t for t in word_re.findall(not_terms)])
if or_terms:
needle += '(%s) ' % ' or '.join(word_re.findall(or_terms))
# check for sensible search term
stripped = needle.strip()
if len(stripped) == 0:
request.theme.add_msg(_('Please use a more selective search term instead '
'of {{{"%s"}}}', wiki=True) % wikiutil.escape(needle), "error")
Page(request, pagename).send_page()
return
needle = stripped
# Setup for type of search
if titlesearch:
title = _('Title Search: "%s"')
sort = 'page_name'
else:
if advancedsearch:
title = _('Advanced Search: "%s"')
else:
| title = _('Full Text Search: "%s"')
sort = 'w | eight'
# search the pages
from MoinMoin.search import searchPages, QueryParser, QueryError
try:
query = QueryParser(case=case, regex=regex,
titlesearch=titlesearch).parse_query(needle)
except QueryError: # catch errors in the search query
request.theme.add_msg(_('Your search query {{{"%s"}}} is invalid. Please refer to '
'HelpOnSearching for more information.', wiki=True, percent=True) % wikiutil.escape(needle), "error")
Page(request, pagename).send_page()
return
results = searchPages(request, query, sort, mtime, historysearch)
# directly show a single hit for title searches
# this is the "quick jump" functionality if you don't remember
# the pagename exactly, but just some parts of it
if titlesearch and len(results.hits) == 1:
page = results.hits[0]
if not page.attachment: # we did not find an attachment
page = Page(request, page.page_name)
querydict = {}
if highlight_pages:
highlight = query.highlight_re()
if highlight:
querydict.update({'highlight': highlight})
url = page.url(request, querystr=querydict)
request.http_redirect(url)
return
|
HBEE/accounting | ecoservice_financeinterface_datev/models/account_invoice.py | Python | gpl-3.0 | 3,072 | 0.00293 | # -*- coding: utf-8 -*-
from openerp import api, fields, models, _
from . import exceptions
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
enable_datev_checks = fields.Boolean('Perform Datev Checks', default=True)
@api.multi
def is_datev_validation_active(self):
self.ensure_one()
return self.enable_datev_checks and self.env['res.users'].browse(self._uid).company_id.enable_datev_checks
@api.multi
def perform_datev_validation(self, silent=False):
is_valid = True
errors = list()
for rec in self:
if rec.is_datev_validation_active():
if silent: # Shorter, more performant version w/o string and exception handling
for line in rec.invoice_line:
if not line.perform_datev_validation(silent=True):
return False
else:
for line_no, line in enumerate(rec.invoice_line, start=1):
try:
line.perform_datev_validation(line_no=line_no)
except exceptions.DatevWarning as dw:
is_valid = False
errors.append(dw.message)
if not (silent or is_valid):
raise exceptions.DatevWarnin | g(u'\n'.join(errors))
return is_valid
class AccountInvoiceLine(models.Model):
_inherit = 'account.invoice.line'
@api.multi
def perform_datev_validation(self, silent=False, line_no=None):
| """
Performs tests on an invoice line for whether the taxes are correctly set or not.
The major use of this method is in the condition of a workflow transition.
:param line_no: int Line number to be displayed in an error message.
:param silent: bool Specifies whether an exception in case of a failed test should be thrown
or if the checks should be performed silently.
:return: True if all checks were performed w/o errors or no datev checks are applicable. False otherwise.
:rtype: bool
"""
self.ensure_one()
if not self.is_datev_validation_applicable():
return True
is_valid = len(self.invoice_line_tax_id) == 1 and self.account_id.datev_steuer == self.invoice_line_tax_id
if not (silent or is_valid):
raise exceptions.DatevWarning(
_(u'Line {line}: The taxes specified in the invoice line ({tax_line}) and the corresponding account ({tax_account}) mismatch!').format(
line=line_no, tax_line=self.invoice_line_tax_id.description, tax_account=self.account_id.datev_steuer.description
)
)
return is_valid
@api.multi
def is_datev_validation_applicable(self):
"""
Tests if an invoice line is applicable to datev checks or not.
:return: True if it is applicable. Otherwise False.
:rtype: bool
"""
self.ensure_one()
return self.account_id.automatic
|
jorgebastida/gordon | gordon/resources/kinesis.py | Python | bsd-3-clause | 139 | 0 | from . import base
class | Kinesis(base.BaseStream) | :
"""Resource which consumes ``Kinesis``streams."""
grn_type = 'kinesis-stream'
|
pixlie/oxidizr | oxidizr/apps/twitter/management/commands/twitter_feed.py | Python | gpl-2.0 | 4,372 | 0.003202 | from twitter import OAuth, TwitterStream
from dateutil import parser
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db.utils import IntegrityError
from apps.keywords.models import BaseKeyword
from apps.twitter.models import Tweet, Account
class Command(BaseCommand):
def handle(self, *args, **options):
if not BaseKeyword.objects.count():
raise CommandError('No keywords found!')
keywords = ','.join([k['term'] for k in BaseKeyword.objects.values('term')])
twitter_stream = TwitterStream(auth=OAuth(
token=settings.TWITTER_TOKEN,
token_secret=settings.TWITTER_TOKEN_SECRET,
consumer_key=settings.TWITTER_CONSUMER_KEY,
consumer_secret=settings.TWITTER_CONSUMER_SECRET)
)
stream = twitter_stream.statuses.filter(track=keywords)
for tweet in stream:
if 'retweeted_status' in tweet:
# If this is a retweet of an earlier tweet, then we want to check only the original.
tweet = tweet['retweeted_status']
user = tweet['user']
author = Account(
twitter_id=user['id_str'],
screen_name=user['screen_name'],
name=user['name'],
url=user['url'] if 'url' in user else None,
status | _count=user['statu | ses_count'] if 'statuses_count' in user else 0,
follower_count=user['followers_count'] if 'followers_count' in user else 0,
following_count=user['friends_count'] if 'friends_count' in user else 0,
listed_in_count=user['listed_count'] if 'listed_count' in user else 0,
is_verified=user['verified'] if 'verified' in user else False
)
if (tweet['retweet_count'] and tweet['favorite_count'] and
(author.get_weight() > 1000 or tweet['entities']['urls'])):
# Some debug prints, visual confirmation :)
print '-=' * 45
print tweet['text'].encode('ascii', 'ignore')
print tweet['created_at'], tweet['favorite_count'], tweet['retweet_count'], author.get_weight()
try:
author.save()
except IntegrityError:
author = Account.objects.get(twitter_id=user['id_str'])
mentions = list()
if tweet['entities']['user_mentions']:
for user in tweet['entities']['user_mentions']:
try:
(mention, created) = Account.objects.get_or_create(
twitter_id=user['id_str'],
screen_name=user['screen_name'],
name=user['name'],
url=user['url'] if 'url' in user else None,
status_count=user['statuses_count'] if 'statuses_count' in user else 0,
follower_count=user['followers_count'] if 'followers_count' in user else 0,
following_count=user['friends_count'] if 'friends_count' in user else 0,
listed_in_count=user['listed_count'] if 'listed_count' in user else 0,
is_verified=user['verified'] if 'verified' in user else False
)
except IntegrityError:
mention = Account.objects.get(twitter_id=user['id_str'])
mentions.append(mention)
# try:
tw, created = Tweet.objects.get_or_create(
tweet_id=tweet['id_str'],
defaults=dict(
author=author,
text=tweet['text'],
created_at=parser.parse(tweet['created_at']),
favorite_count=tweet['favorite_count'],
retweet_count=tweet['retweet_count']
)
)
# except IntegrityError:
# tw = Tweet.objects.get(tweet_id=tweet['id_str'])
for user in mentions:
tw.mentions.add(user)
else:
continue |
MesserLab/SLiM | treerec/tests/test_specific_recipes.py | Python | gpl-3.0 | 6,364 | 0.004243 | """
Tests that look at the result of specific recipes in test_recipes
These may depend on the exact random seed used to run the SLiM simulation.
Individual recipes can be specified by decorating with
@pytest.mark.parametrize('recipe', ['test_____my_specific_recipe.slim'], indirect=True)
"""
import msprime
import numpy as np
import pyslim
import pytest
from recipe_specs import recipe_eq
class TestUnaryNodes:
def max_children_node(self, ts, exclude_roots=True, exclude_samples=True):
"""
Return a dict mapping nodes to their maximumn # children in the ts
"""
max_children = np.zeros(ts.num_nodes, dtype=int)
filter = np.zeros(ts.num_nodes, dtype=bool)
if exclude_samples:
filter[ts.samples()] = True
if exclude_roots:
filter[np.isin(np.arange(ts.num_nodes), ts.tables.edges.child) == False] = True
for tree in ts.trees():
for n in tree.nodes():
if not ts.node(n).is_sample():
max_children[n] = max(max_children[n], tree.num_children(n))
arr = {n:mc for n, (mc, rm) in enumerate(zip(max_children, filter)) if not rm}
return arr
@pytest.mark.parametrize('recipe', indirect=True, argvalues=[
"test_____retain_individuals_nonWF_unary.slim",
"test_____retain_individuals_unary.slim",
])
def test_contains_unary_nonsample_nodes(self, recipe):
for result in recipe["results"]:
for ts in result.get_ts():
assert np.any(np.array(list(self.max_children_node(ts).values())) == 1)
@pytest.mark.parametrize('recipe', indirect=True, argvalues=[
'test_____remember_individuals.slim',
])
def test_contains_unary_sample_nodes(self, recipe):
"""
Historical remembered individuals can have a node with a single descendant
"""
for result in recipe["results"]:
for ts in result.get_ts():
num_unary_nodes = 0
for tree in ts.trees():
for n in tree.nodes():
if tree.num_children(n) == 1 and tree.parent(n) >= 0:
assert | ts.node(n).individual >= 0 # has an individual
ind = ts.individual(ts.node(n).individual)
assert ts.node(n).is_sample()
assert (ind.flags & pyslim.INDIVIDUAL_REMEMBERED) != 0
num_unary_nodes += 1
assert num_unary_n | odes > 0
@pytest.mark.parametrize('recipe', indirect=True, argvalues=[
"test_____retain_and_remember_individuals.slim",
"test_____retain_individuals_nonWF.slim",
"test_____remember_individuals.slim",
])
def test_no_purely_unary_internal_nonsample_nodes(self, recipe):
for result in recipe["results"]:
for ts in result.get_ts():
max_children_per_node = self.max_children_node(ts)
assert np.all(np.array(list(max_children_per_node.values())) != 1)
class TestIndividualsInGeneration:
"""
Test that all the nodes on the ancestry at a fixed set of generations are present
"""
def num_lineages_at_time(self, ts, focal_time, pos):
edges = ts.tables.edges
times = ts.tables.nodes.time
return np.sum(np.logical_and.reduce((
times[edges.parent] > focal_time,
times[edges.child] <= focal_time,
edges.left <= pos,
edges.right > pos,
)))
@pytest.mark.parametrize('recipe, gens, final_gen', indirect=["recipe"], argvalues=[
('test_____retain_individuals_unary.slim', (50, 100), 200),
])
def test_all_lineages_covered(self, recipe, gens, final_gen):
"""
If all individuals in `gen` are retained with retainCoalescentOnly=F, we should
have unary nodes in individuals for all tree sequence lineages
"""
gens = final_gen - np.array(gens) # convert to ts times
for result in recipe["results"]:
for ts in result.get_ts():
# Check all initial generation nodes are roots
nodes_at_start = np.where(ts.tables.nodes.time == final_gen)[0]
nodes_at_gen = {g: np.where(ts.tables.nodes.time == g)[0] for g in gens}
for nodes in nodes_at_gen.values():
# All nodes in the target generations should have an individual
assert np.all(ts.tables.nodes.individual[nodes] >= 0)
for tree in ts.trees():
assert np.all(np.isin(tree.roots, nodes_at_start))
for gen, nodes in nodes_at_gen.items():
treenodes_at_gen = set(tree.nodes()) & set(nodes)
assert len(treenodes_at_gen) == self.num_lineages_at_time(
ts, gen, tree.interval.left)
@pytest.mark.parametrize('recipe, gens, final_gen', indirect=["recipe"], argvalues=[
('test_____retain_and_remember_individuals.slim', (50, 100), 200),
])
def test_not_all_lineages_covered(self, recipe, gens, final_gen):
"""
If all individuals in `gen` are retained with retainCoalescentOnly=T, we should
simplify out those with only unary nodes, and not all lineages will have a node
"""
gens = final_gen - np.array(gens) # convert to ts times
for result in recipe["results"]:
for ts in result.get_ts():
# Check all initial generation nodes are roots
nodes_at_start = np.where(ts.tables.nodes.time == final_gen)[0]
nodes_at_gen = {g: np.where(ts.tables.nodes.time == g)[0] for g in gens}
for nodes in nodes_at_gen.values():
# All nodes in the target generations should have an individual
assert np.all(ts.tables.nodes.individual[nodes] >= 0)
for tree in ts.trees():
assert np.all(np.isin(tree.roots, nodes_at_start))
for gen, nodes in nodes_at_gen.items():
treenodes_at_gen = set(tree.nodes()) & set(nodes)
assert len(treenodes_at_gen) < self.num_lineages_at_time(
ts, gen, tree.interval.left)
|
YunoHost/yunotest | jsonschema/validators.py | Python | gpl-3.0 | 14,604 | 0.003766 | from __future__ import division
import contextlib
import json
import numbers
try:
import requests
except ImportError:
requests = None
from jsonschema import _utils, _validators
from jsonschema.compat import (
Sequence, urljoin, urlsplit, urldefrag, unquote, urlopen,
str_types, int_types, iteritems,
)
from jsonschema.exceptions import ErrorTree # Backwards compatibility # noqa
from jsonschema.exceptions import RefResolutionError, SchemaError, UnknownType
_unset = _utils.Unset()
validators = {}
meta_schemas = _utils.URIDict()
def validates(version):
"""
Register the decorated validator for a ``version`` of the specification.
Registered validators and their meta schemas will be considered when
parsing ``$schema`` properties' URIs.
:argument str version: an identifier to use as the version's name
:returns: a class decorator to decorate the validator with the version
"""
def _validates(cls):
validators[version] = cls
if u"id" in cls.META_SCHEMA:
meta_schemas[cls.META_SCHEMA[u"id"]] = cls
return cls
return _validates
def create(meta_schema, validators=(), version=None, default_types=None): # noqa
if default_types is None:
default_types = {
u"array" : list, u"boolean" : bool, u"integer" : int_types,
u"null" : type(None), u"number" : numbers.Number, u"object" : dict,
u"string" : str_types,
}
class Validator(object):
VALIDATORS = dict(validators)
META_SCHEMA = dict(meta_schema)
DEFAULT_TYPES = dict(default_types)
def __init__(
self, schema, types=(), resolver=None, format_checker=None,
):
self._types = dict(self.DEFAULT_TYPES)
self._types.update(types)
if resolver is None:
resolver = RefResolver.from_schema(schema)
self.resolver = resolver
self.format_checker = format_checker
self.schema = schema
@classmethod
def check_schema(cls, schema):
for error in cls(cls.META_SCHEMA).iter_errors(schema):
raise SchemaError.create_from(error)
def iter_errors(self, instance, _schema=None):
if _schema is None:
_schema = self.schema
with self.resolver.in_scope(_schema.get(u"id", u"")):
ref = _schema.get(u"$ref")
if ref is not None:
validators = [(u"$ref", ref)]
else:
validators = iteritems(_schema)
for k, v in validators:
validator = self.VALIDATORS.get(k)
if validator is None:
continue
errors = validator(self, v, instance, _schema) or ()
for error in errors:
# set details if not already set by the called fn
error._set(
validator=k,
validator_value=v,
instance=instance,
schema=_schema,
)
if k != u"$ref":
error.schema_path.appendleft(k)
yield error
def descend(self, instance, schema, path=None, schema_path=None):
for error in self.iter_errors(instance, schema):
if path is not None:
error.path.appendleft(path)
if schema_path is not None:
error.schema_path.appendleft(schema_path)
yield error
def validate(self, *args, **kwargs):
for error in self.iter_errors(*args, **kwargs):
raise error
def is_type(self, instance, type):
if type not in self._types:
raise UnknownType(type, instance, self.schema)
pytypes = self._types[type]
# bool inherits from int, so ensure bools aren't reported as ints
if isinstance(instance, bool):
pytypes = _utils.flatten(pytypes)
is_number = any(
issubclass(pytype, numbers.Number) for pytype in pytypes
)
if is_number and bool not in pytypes:
return False
return isinstance(instance, pytypes)
def is_valid(self, instance, _schema=None):
error = next(self.iter_errors(instance, _schema), None)
return error is None
if version is not None:
Validator = validates(version)(Validator)
Validator.__name__ = version.title().replace(" ", "") + "Validator"
return Validator
def extend(validator, validators, version=None):
all_validators = dict(validator.VALIDATORS)
all_validators.update(validators)
return create(
meta_schema=validator.META_SCHEMA,
validators=all_validators,
version=version,
default_types=validator.DEFAULT_TYPES,
)
Draft3Validator = create(
meta_schema=_utils.load_schema("draft3"),
validators={
u"$ref" : _validators.ref,
u"additionalItems" : _validators.additionalItems,
u"additionalProperties" : _validators.additionalProperties,
u"dependencies" : _validators.dependencies,
u"disallow" : _validators.disallow_draft3,
u"divisibleBy" : _validators.multipleOf,
u"enum" : _validators.enum,
u"extends" : _validators.extends_draft3,
u"format" : _validators.format,
u"items" : _validators.items,
u"maxItems" : _validators.maxItems,
u"maxLength" : _validators.maxLength,
u"maximum" : _validators.maximum,
u"minItems" : _validators.minItems,
u"minLength" : _validators.minLength,
u"minimum" : _validators.minimum,
u"multipleOf" : _validators.multipleOf,
u"pattern" : _validators.pattern,
u"patternProperties" : _validators.patternProperties,
u"properties" : _validators.properties_draft3,
u"type" : _validators.type_draft3,
u"uniqueItems" : _validators.uniqueItems,
},
version="draft3",
)
Draft4Validator = create(
meta_schema=_utils.load_schema("draft4"),
validators={
u"$ref" : _validators.ref,
u"additionalItems" : _validators.additionalItems,
u"additionalProperties" : _validators.additionalProperties,
u"allOf" : _validators.allOf_draft4,
u"anyOf" : _validators.anyOf_draft4,
u"dependencies" : _validators.dependencies,
u"enum" : _validators.enum,
u"format" : _validators.format,
u"items" : _validators.items,
u"maxItems" : _validators.maxItems,
u"maxLength" : _validators.maxLength,
u"maxProperties" : _validators.maxProperties_draft4,
u"maximum" : _validators.maximum,
u"minItems" : _validators.minItems,
u"minLength" : _validators.minLength,
u"minProperties" : _validators.minProperties_draft4,
u"minimum" : _validators.minimum,
| u"multipleOf" : _validators.multipleOf,
u"not" : _validators.not_draft4,
u"oneOf" : _validators.oneOf_draft4,
u"pattern" : _validators.pattern,
u"patternProperties" : _validators.patternProperties,
u"properties" : _validators.properties_draft4,
u"required" : _validators.required_draft4,
u"type" : _validators.type_dr | aft4,
u"uniqueItems" : _validators.uniqueItems,
},
version="draft4",
)
class RefResolver(object):
"""
Resolve JSON References.
:argument str base_uri: URI of the referring document
:argument referrer: the actual referring document
:argument dict store: a mapping from URIs to documents to cache
:argument bool cache_remote: whether remote refs should be cached after
first resolution
:argument dict handlers: a mapping from URI schemes to functions that
should be used to retrieve them
"""
def __init__(
self, base_uri, referrer, store=(), cache_remote=True, handlers |
FireballDWF/cloud-custodian | c7n/resources/ecr.py | Python | apache-2.0 | 11,773 | 0.00034 | # Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from c7n.actions import RemovePolicyBase, Action
from c7n.exceptions import PolicyValidationError
from c7n.filters import CrossAccountAccessFilter, Filter, ValueFilter
from c7n.manager import resources
from c7n.query import QueryResourceManager, TypeInfo
from c7n import tags
from c7n.utils import local_session, type_schema
@resources.register('ecr')
class ECR(QueryResourceManager):
class resource_type(TypeInfo):
service = 'ecr'
enum_spec = ('describe_repositories', 'repositories', None)
name = "repositoryName"
arn = id = "repositoryArn"
arn_type = 'repository'
filter_name = 'repositoryNames'
filter_type = 'list'
def augment(self, resources):
client = local_session(self.session_factory).client('ecr')
results = []
for r in resources:
try:
r['Tags'] = client.list_tags_for_resource(
resourceArn=r['repositoryArn']).get('tags')
results.append(r)
except client.exceptions.RepositoryNotFoundException:
continue
return results
@ECR.action_registry.register('tag')
class ECRTag(tags.Tag):
permissions = ('ecr:TagResource',)
def process_resource_set(self, client, resources, tags):
for r in resources:
try:
client.tag_resource(resourceArn=r['repositoryArn'], tags=tags)
except client.exceptions.RepositoryNotFoundException:
pass
@ECR.action_registry.register('remove-tag')
class ECRRemoveTags(tags.RemoveTag):
permissions = ('ecr:UntagResource',)
def process_resource_set(self, client, resources, tags):
for r in resources:
try:
client.untag_resource(resourceArn=r['repositoryArn'], tagKeys=tags)
except client.exceptions.RepositoryNotFoundException:
pass
ECR.filter_registry.register('marked-for-op', tags.TagActionFilter)
ECR.action_registry.register('mark-for-op', tags.TagDelayedAction)
@ECR.filter_registry.register('cross-account')
class ECRCrossAccountAccessFilter(CrossAccountAccessFilter):
"""Filters all EC2 Container Registries (ECR) with cross-account access
:example:
.. code-block:: yaml
policies:
- name: ecr-cross-account
resource: ecr
filters:
- type: cross-account
whitelist_from:
expr: "accounts.*.accountNumber"
url: accounts_url
"""
permissions = ('ecr:GetRepositoryPolicy',)
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('ecr')
def _augment(r):
try:
r['Policy'] = client.get_repository_policy(
repositoryName=r['repositoryName'])['policyText']
except client.exceptions.RepositoryPolicyNotFoundException:
return None
return r
self.log.debug("fetching policy for %d repos" % len(resources))
with self.executor_factory(max_workers=2) as w:
resources = list(filter(None, w.map(_augment, resources)))
return super(ECRCrossAccountAccessFilter, self).process(resources, event)
LIFECYCLE_RULE_SCHEMA = {
'type': 'object',
'additionalProperties': False,
'required': ['rulePriority', 'action', 'selection'],
'properties': {
'rulePriority': {'type': 'integer'},
'description': {'type': 'string'},
'action': {
'type': 'object',
'required': ['type'],
'additionalProperties': False,
'properties': {'type': {'enum': ['expire']}}},
'selection': {
'type': 'object',
'addtionalProperties': False,
'required': ['countType', 'countUnit'],
'properties': {
'tagStatus': {'enum': ['tagged', 'untagged', 'any']},
'tagPrefixList': {'type': 'array', 'items': {'type': 'string'}},
'countNumber': {'type': 'integer'},
'countUnit': {'enum': ['hours', 'days']},
'countType': {
'enum': ['imageCountMoreThan', 'sinceImagePushed']},
}
}
}
}
def lifecycle_rule_validate(policy, rule):
# This is a non exhaustive list of lifecycle validation rules
# see this for a more comprehensive list
#
# https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html#lp_evaluation_rules
if (rule['selection']['tagStatus'] == 'tagged' and
'tagPrefixList' not in rule['selection']):
raise PolicyValidati | onError(
("{} has invalid lifecycle rule {} tagprefi | xlist "
"required for tagStatus: tagged").format(
policy.name, rule))
if (rule['selection']['countType'] == 'sinceImagePushed' and
'countUnit' not in rule['selection']):
raise PolicyValidationError(
("{} has invalid lifecycle rule {} countUnit "
"required for countType: sinceImagePushed").format(
policy.name, rule))
@ECR.filter_registry.register('lifecycle-rule')
class LifecycleRule(Filter):
"""Lifecycle rule filtering
:Example:
.. code-block:: yaml
policies:
- name: ecr-life
resource: aws.ecr
filters:
- type: lifecycle-rule
state: False
match:
- selection.tagStatus: untagged
- action.type: expire
- type: value
key: selection.countNumber
value: 30
op: less-than
"""
permissions = ('ecr:GetLifecyclePolicy',)
schema = type_schema(
'lifecycle-rule',
state={'type': 'boolean'},
match={'type': 'array', 'items': {
'oneOf': [
{'$ref': '#/definitions/filters/value'},
{'type': 'object', 'minProperties': 1, 'maxProperties': 1},
]}})
policy_annotation = 'c7n:lifecycle-policy'
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('ecr')
for r in resources:
if self.policy_annotation in r:
continue
try:
r[self.policy_annotation] = json.loads(
client.get_lifecycle_policy(
repositoryName=r['repositoryName']).get(
'lifecyclePolicyText', ''))
except client.exceptions.LifecyclePolicyNotFoundException:
r[self.policy_annotation] = {}
state = self.data.get('state', False)
matchers = []
for matcher in self.data.get('match', []):
vf = ValueFilter(matcher)
vf.annotate = False
matchers.append(vf)
results = []
for r in resources:
found = False
for rule in r[self.policy_annotation].get('rules', []):
found = True
for m in matchers:
if not m(rule):
found = False
if found and state:
results.append(r)
if not found and not state:
results.append(r)
return results
@ECR.action_registry.register('set-lifecycle')
class SetLifecycle(Action):
"""Set the lif |
mistercrunch/panoramix | superset/examples/country_map.py | Python | apache-2.0 | 3,977 | 0.000754 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import pandas as pd
from sqlalchemy import BigInteger, Date, String
from sqlalchemy.sql import column
from superset import db
from superset.connectors.sqla.models import SqlMetric
from superset.models.slice import Slice
from superset.utils import core as utils
from .helpers import (
get_example_data,
get_slice_json,
get_table_connector_registry,
merge_slice,
misc_dash_slices,
)
def load_country_map_data(only_metadata: bool = False, force: bool = False) -> None:
"""Loading data for map with country map"""
tbl_name = "birth_france_by_region"
database = utils.get_example_database()
table_exists = database.has_table_by_name(tbl_name)
if not only_metadata and (not table_exists or force):
csv_bytes = get_example_data(
"birth_france_data_for_country_map.csv", is_gzip=False, make_bytes=True
)
data = pd.read_csv(csv_bytes, encoding="utf-8")
data["dttm"] = datetime.datetime.now().date()
data.to_sql( # pylint: disable=no-member
tbl_name,
database.get_sqla_engine(),
if_exists="replace",
chunksize=500,
dtype={
"DEPT_ID": String(10),
"2003": BigInteger,
"2004": BigInteger,
"2005": BigInteger,
"2006": BigInteger,
"2007": BigInteger,
"2008": BigInteger,
"2009": BigInteger,
"2010": BigInteger,
"2011": BigInteger,
"2012": BigInteger,
"2013": BigInteger,
"2014": BigInteger,
"dttm": Date(),
},
index=False,
)
print("Done loading table!")
print("-" * 80)
print("Creating table reference")
table = get_table_connector_registry()
obj = db.session.query(table).filter_by(table_name=tbl_name).first()
if not obj:
obj = table(table_name=tbl_name)
obj.main_dttm_col = "dttm"
obj.database = database
obj.filter_select_enabled = True
if not any(col.metric_name == "avg__2004" for col in obj.metrics):
col = str(column("2004").compile(db.engine))
obj.metrics.append(SqlMetric(metric_name="avg__2004", expression=f"AVG({col})"))
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
"granularity_sqla": "",
"since": "",
"until": " | ",
| "viz_type": "country_map",
"entity": "DEPT_ID",
"metric": {
"expressionType": "SIMPLE",
"column": {"type": "INT", "column_name": "2004"},
"aggregate": "AVG",
"label": "Boys",
"optionName": "metric_112342",
},
"row_limit": 500000,
"select_country": "france",
}
print("Creating a slice")
slc = Slice(
slice_name="Birth in France by department in 2016",
viz_type="country_map",
datasource_type="table",
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
|
kubaszostak/gdal-dragndrop | osgeo/apps/Python27/Scripts/gdal2tiles.py | Python | mit | 123,604 | 0.001659 | #!C:\OSGEO4~1\bin\python.exe
# -*- coding: utf-8 -*-
# ******************************************************************************
# $Id: gdal2tiles.py 1f32f6fdbfd2fd352047db41d316d8afbe3c3319 2018-11-30 01:05:06 +0100 Even Rouault $
#
# Project: Google Summer of Code 2007, 2008 (http://code.google.com/soc/)
# Support: BRGM (http://www.brgm.fr)
# Purpose: Convert a raster into TMS (Tile Map Service) tiles in a directory.
# - generate Google Earth metadata (KML SuperOverlay)
# - generate simple HTML viewer based on Google Maps and OpenLayers
# - support of global tiles (Spherical Mercator) for compatibility
# with interactive web maps a la Google Maps
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
# GUI: http://www.maptiler.org/
#
###############################################################################
# Copyright (c) 2008, Klokan Petr Pridal
# Copyright (c) 2010-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
from __future__ import print_function, division
import math
from multiprocessing import Pipe, Pool, Process, Manager
import os
import tempfile
import threading
import shutil
import sys
from uuid import uuid4
from xml.etree import ElementTree
from osgeo import gdal
from osgeo import osr
try:
from PIL import Image
import numpy
import osgeo.gdal_array as gdalarray
numpy_available = True
except ImportError:
# 'antialias' resampling is not available
numpy_available = False
__version__ = "$Id: gdal2tiles.py 1f32f6fdbfd2fd352047db41d316d8afbe3c3319 2018-11-30 01:05:06 +0100 Even Rouault $"
resampling_list = ('average', 'near', 'bilinear', 'cubic', 'cubicspline', 'lanczos', 'antialias')
profile_list = ('mercator', 'geodetic', 'raster')
webviewer_list = ('all', 'google', 'openlayers', 'leaflet', 'none')
threadLocal = threading.local()
# =============================================================================
# =============================================================================
# =============================================================================
__doc__globalmaptiles = """
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:3857)
for Google Maps, Yahoo Maps, Bing Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it useful for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
MAXZOOMLEVEL = 32
class GlobalMercator(object):
r"""
TMS Global Mercator Profile
---------------------------
Fu | nctions necessary for generation of tiles in Spherical Mercator projection,
EPSG:3857.
Such tiles are compatible with Google Maps, Bing Maps, Yahoo Maps,
UK Ordnance Survey OpenSpace | API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in meters XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:387
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:3857?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:3857
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:3857?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yes?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually
noticeable.
How do I create a raster in EPSG:3857 and convert coordinates with PROJ.4?
You can |
sciCloud/OLiMS | lims/browser/method.py | Python | agpl-3.0 | 3,775 | 0.002384 | from lims.browser import BrowserView
from dependencies.dependency import getToolByName
from dependencies.dependency import check as CheckAuthenticator
import json
class ajaxGetInstruments(BrowserView):
""" Returns a json list with the instruments assigned to the method
with the following structure:
[{'uid': <instrument_uid>,
'title': <instrument_absolute_path>,
'url': <instrument_url>,
'outofdate': True|False,
'qcfail': True|False,
'isvalid': True|False},
]
"""
def __call__(self):
instruments = []
try:
CheckAuthenticator(self.request)
except Forbidden:
return json.dumps(instruments)
bsc = getToolByName(s | elf, 'portal_catalog')
method = bsc(portal_type='Method', UID=self.request.get("uid", '0'))
if method and len(method) == 1:
method = method[0].getObject()
for i in method.getInstruments():
instrument = { 'uid' : i.UID(),
'title': i.Title(),
| 'url': i.absolute_url_path(),
'outofdate': i.isOutOfDate(),
'qcfail': not i.isQCValid(),
'isvalid': i.isValid()}
instruments.append(instrument)
return json.dumps(instruments)
class ajaxGetMethodServiceInstruments(BrowserView):
""" Returns a json list with the instruments assigned to the method
and to the analysis service with the following structure:
[{'uid': <instrument_uid>,
'title': <instrument_absolute_path>,
'url': <instrument_url>,
'outofdate': True|False,
'qcfail': True|False,
'isvalid': True|False},
]
If no method assigned, returns the instruments assigned to the
service that have no method assigned.
If no service assigned, returns empty
"""
def __call__(self):
instruments = []
try:
CheckAuthenticator(self.request)
except Forbidden:
return json.dumps(instruments)
uc = getToolByName(self, 'uid_catalog')
service = uc(portal_type='AnalysisService', UID=self.request.get("suid", '0'))
if not service or len(service) != 1:
return json.dumps(instruments)
service = service[0].getObject()
sinstr = service.getAvailableInstruments()
if not sinstr:
return json.dumps(instruments)
method = uc(portal_type='Method', UID=self.request.get("muid", '0'))
if not method or len(method) != 1:
for i in sinstr:
if not i.getMethod():
instrument = { 'uid' : i.UID(),
'title': i.Title(),
'url': i.absolute_url_path(),
'outofdate': i.isOutOfDate(),
'qcfail': not i.isQCValid(),
'isvalid': i.isValid()}
instruments.append(instrument)
return json.dumps(instruments)
method = method[0].getObject()
iuids = [s.UID() for s in sinstr]
for i in method.getInstruments():
if i.UID() in iuids:
instrument = { 'uid' : i.UID(),
'title': i.Title(),
'url': i.absolute_url_path(),
'outofdate': i.isOutOfDate(),
'qcfail': not i.isQCValid(),
'isvalid': i.isValid()}
instruments.append(instrument)
return json.dumps(instruments)
|
sbesson/zeroc-ice | rb/allTests.py | Python | gpl-2.0 | 1,545 | 0.003883 | #!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2013 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, re, getopt
path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise RuntimeError("can't find toplevel directory!")
sys.path.append(os.path.join(path[0], "scripts"))
import TestUtil
#
# List of all basic tests.
#
tests = [
("Slice/keyword", ["once"]),
("Ice/binding", ["core"]),
("Ice/checksum", ["core"]),
("Ice/exceptions", ["core"]),
("Ice/facets", ["core"]),
("Ice/info", ["core", "noipv6", "nocompress"]),
("Ice/inheritance", ["core"]),
( | "Ice/location", ["core"]),
("Ice/objects", ["core"]),
("Ice/proxy", ["core"]),
("Ice/properties", ["once", "nowin32"]),
("Ice/operations", ["core"]),
("Ice/retry", ["core"]),
("Ice/timeout", ["core"]),
("Ice/slicing/exceptions", ["core"]),
("Ice/slicing/objects", ["core"]),
| ("Ice/defaultValue", ["core"]),
("Ice/optional", ["core"]),
("Ice/enums", ["core"])
]
if __name__ == "__main__":
TestUtil.run(tests)
|
devs1991/test_edx_docmode | lms/djangoapps/discussion_api/tests/utils.py | Python | agpl-3.0 | 13,936 | 0.001435 | """
Discussion API test utilities
"""
import json
import re
import httpretty
def _get_thread_callback(thread_data):
"""
Get a callback function that will return POST/PUT data overridden by
response_overrides.
"""
def callback(request, _uri, headers):
"""
Simulate the thread creation or update endpoint by returning the provided
data along with the data from response_overrides and dummy values for any
additional required fields.
"""
response_data = make_minimal_cs_thread(thread_data)
for key, val_list in request.parsed_body.items():
val = val_list[0]
if key in ["anonymous", "anonymous_to_peers", "closed", "pinned"]:
response_data[key] = val == "True"
else:
response_data[key] = val
return (200, headers, json.dumps(response_data))
return callback
def _get_comment_callback(comment_data, thread_id, parent_id):
"""
Get a callback function that will return a comment containing the given data
plus necessary dummy data, overridden by the content of the POST/PUT
request.
"" | "
def callback(request, _uri, headers):
"""
Simulate the comment creation or update endpoint as described above.
"""
response_data = make_minimal_cs_comment(comment_data)
# thread_id and parent_id are not included in request | payload but
# are returned by the comments service
response_data["thread_id"] = thread_id
response_data["parent_id"] = parent_id
for key, val_list in request.parsed_body.items():
val = val_list[0]
if key in ["anonymous", "anonymous_to_peers", "endorsed"]:
response_data[key] = val == "True"
else:
response_data[key] = val
return (200, headers, json.dumps(response_data))
return callback
class CommentsServiceMockMixin(object):
"""Mixin with utility methods for mocking the comments service"""
def register_get_threads_response(self, threads, page, num_pages):
"""Register a mock response for GET on the CS thread list endpoint"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/threads",
body=json.dumps({
"collection": threads,
"page": page,
"num_pages": num_pages,
"thread_count": len(threads),
}),
status=200
)
def register_get_threads_search_response(self, threads, rewrite, num_pages=1):
"""Register a mock response for GET on the CS thread search endpoint"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/search/threads",
body=json.dumps({
"collection": threads,
"page": 1,
"num_pages": num_pages,
"corrected_text": rewrite,
"thread_count": len(threads),
}),
status=200
)
def register_post_thread_response(self, thread_data):
"""Register a mock response for POST on the CS commentable endpoint"""
httpretty.register_uri(
httpretty.POST,
re.compile(r"http://localhost:4567/api/v1/(\w+)/threads"),
body=_get_thread_callback(thread_data)
)
def register_put_thread_response(self, thread_data):
"""
Register a mock response for PUT on the CS endpoint for the given
thread_id.
"""
httpretty.register_uri(
httpretty.PUT,
"http://localhost:4567/api/v1/threads/{}".format(thread_data["id"]),
body=_get_thread_callback(thread_data)
)
def register_get_thread_error_response(self, thread_id, status_code):
"""Register a mock error response for GET on the CS thread endpoint."""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/threads/{id}".format(id=thread_id),
body="",
status=status_code
)
def register_get_thread_response(self, thread):
"""
Register a mock response for GET on the CS thread instance endpoint.
"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/threads/{id}".format(id=thread["id"]),
body=json.dumps(thread),
status=200
)
def register_post_comment_response(self, comment_data, thread_id, parent_id=None):
"""
Register a mock response for POST on the CS comments endpoint for the
given thread or parent; exactly one of thread_id and parent_id must be
specified.
"""
if parent_id:
url = "http://localhost:4567/api/v1/comments/{}".format(parent_id)
else:
url = "http://localhost:4567/api/v1/threads/{}/comments".format(thread_id)
httpretty.register_uri(
httpretty.POST,
url,
body=_get_comment_callback(comment_data, thread_id, parent_id)
)
def register_put_comment_response(self, comment_data):
"""
Register a mock response for PUT on the CS endpoint for the given
comment data (which must include the key "id").
"""
thread_id = comment_data["thread_id"]
parent_id = comment_data.get("parent_id")
httpretty.register_uri(
httpretty.PUT,
"http://localhost:4567/api/v1/comments/{}".format(comment_data["id"]),
body=_get_comment_callback(comment_data, thread_id, parent_id)
)
def register_get_comment_error_response(self, comment_id, status_code):
"""
Register a mock error response for GET on the CS comment instance
endpoint.
"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/comments/{id}".format(id=comment_id),
body="",
status=status_code
)
def register_get_comment_response(self, response_overrides):
"""
Register a mock response for GET on the CS comment instance endpoint.
"""
comment = make_minimal_cs_comment(response_overrides)
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/comments/{id}".format(id=comment["id"]),
body=json.dumps(comment),
status=200
)
def register_get_user_response(self, user, subscribed_thread_ids=None, upvoted_ids=None):
"""Register a mock response for GET on the CS user instance endpoint"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/users/{id}".format(id=user.id),
body=json.dumps({
"id": str(user.id),
"subscribed_thread_ids": subscribed_thread_ids or [],
"upvoted_ids": upvoted_ids or [],
}),
status=200
)
def register_subscribed_threads_response(self, user, threads, page, num_pages):
"""Register a mock response for GET on the CS user instance endpoint"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/users/{}/subscribed_threads".format(user.id),
body=json.dumps({
"collection": threads,
"page": page,
"num_pages": num_pages,
"thread_count": len(threads),
}),
status=200
)
def register_subscription_response(self, user):
"""
Register a mock response for POST and DELETE on the CS user subscription
endpoint
"""
for method in [httpretty.POST, httpretty.DELETE]:
httpretty.register_uri(
method,
"http://localhost:4567/api/v1/users/{id}/subscriptions".format(id=user.id),
body=json.dumps({}), # body is unused
status=200
)
def register_thread_votes_response(self, thread_id):
""" |
marcinn/restosaur | restosaur/context.py | Python | bsd-2-clause | 7,771 | 0 | import collections
import email
import types
import urllib
import urlparse
import responses
import times
# todo: implement own conversion utility
from django.utils.encoding import force_bytes
from .loading import load_resource
def parse_http_date(header, headers):
if header in headers and headers[header]:
timetuple = email.utils.parsedate_tz(headers[header])
try:
return times.from_unix(email.utils.mktime_tz(timetuple))
except (TypeError, ValueError):
pass
class QueryDict(collections.MutableMapping):
"""
QueryDict acts like a plain `dict` type, but it handles
automatially multiple values for same key.
The most safest representation of URI query parameters is a list
of tuples, because the parameter names aren't unique. Unfortunately
accessing list of tuples is not so handy, so a mapping is
required.
In most cases query parameters looks like a mapping of simple
key => value pairs, so we're expecting just one value per key. But when
value is a list, we're expecting that accessing a key will return that
list, not last nor first value.
The problematic case is for keys, for which we're expecting always a list
of values, but just one was passed in URI. Accessing the key will give
just straight value instead of expected list with one item. In that cases
you should use `QueryDict.getlist()` directly, which returns always a list.
The values are stored internally as lists | .
`.items()` method returns a list of (key, value) tuples, where value is
a single value from a key's values list. This means that key may not be
unique. This representation is compatible with `urllib.urlencode()`.
`.keys()` returns unique key names, same as for pure `dict`.
`.values()` returns list of same values, which can be accessed by key,
|
`.lists()` returns internal representation as list of lists.
"""
def __init__(self, initial=None):
self._data = {}
self.update(initial)
def update(self, data):
if data is None:
return
else:
try:
data = data.items()
except AttributeError:
pass
finally:
keys = set([x[0] for x in data])
for key in keys:
self._data[key] = []
for key, value in data:
if isinstance(value, (types.ListType, types.TupleType)):
for x in value:
self._data[key].append(x)
else:
self._data[key].append(value)
def items(self):
result = []
for key, values in self._data.items():
result += map(lambda x: (key, x), values)
return result
def getlist(self, key, default=None):
return self._data.get(key, default)
def lists(self):
return self._data.items()
def __setitem__(self, key, value):
return self.update({key: value})
def __getitem__(self, key):
return self._data[key][-1]\
if len(self._data[key]) < 2 else self._data[key]
def __delitem__(self, key):
del self._data[key]
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __repr__(self):
return repr(self._data)
class Context(object):
def __init__(
self, api, request, resource, method, parameters=None,
body=None, data=None, files=None, raw=None, extra=None,
headers=None):
self.method = method
self.api = api
self.headers = headers or {}
self.request = request
self.body = body
self.raw = raw
self.resource = resource
self.parameters = QueryDict(parameters) # GET
self.data = data or {} # POST
self.files = files or {} # FILES
self.deserializer = None
self.content_type = None
self.extra = extra or {}
def build_absolute_uri(self, path=None, parameters=None):
"""
Returns absolute uri to the specified `path` with optional
query string `parameters`.
If no `path` is provided, the current request full path
(including query string) will be used and extended by
optional `parameters`.
"""
def build_uri(path):
current = 'http%s://%s%s' % (
's' if self.request.is_secure() else '',
self.request.get_host(), self.request.path)
return urlparse.urljoin(current, path)
params = QueryDict()
if path:
full_path = u'/'.join(
filter(None, (self.api.path+path).split('/')))
if path.endswith('/'):
full_path += '/'
uri = build_uri('/'+full_path)
else:
params.update(self.parameters.items())
uri = build_uri(self.request.path)
# todo: change to internal restosaur settings
enc = self.request.GET.encoding
params.update(parameters or {})
params = map(
lambda x: (x[0], force_bytes(x[1], enc)),
params.items())
if params:
return '%s?%s' % (uri, urllib.urlencode(params))
else:
return uri
def url_for(self, resource, **kwargs):
"""
Shortcut wrapper of `resource.uri()`
"""
if isinstance(resource, types.StringTypes):
resource = load_resource(resource)
return resource.uri(self, params=kwargs)
def is_modified_since(self, dt):
"""
Compares datetime `dt` with `If-Modified-Since` header value.
Returns True if `dt` is newer than `If-Modified-Since`,
False otherwise.
"""
if_modified_since = parse_http_date('if-modified-since', self.headers)
if if_modified_since:
return times.to_unix(
dt.replace(microsecond=0)) > times.to_unix(if_modified_since)
return True
@property
def deserialized(self):
return self.body
# response factories
def Response(self, *args, **kwargs):
return responses.Response(self, *args, **kwargs)
def Created(self, *args, **kwargs):
return responses.CreatedResponse(self, *args, **kwargs)
def ValidationError(self, *args, **kwargs):
return responses.ValidationErrorResponse(self, *args, **kwargs)
def NotAcceptable(self, *args, **kwargs):
return responses.NotAcceptableResponse(self, *args, **kwargs)
def NotFound(self, *args, **kwargs):
return responses.NotFoundResponse(self, *args, **kwargs)
def SeeOther(self, *args, **kwargs):
return responses.SeeOtherResponse(self, *args, **kwargs)
def NotModified(self, *args, **kwargs):
return responses.NotModifiedResponse(self, *args, **kwargs)
def MethodNotAllowed(self, *args, **kwargs):
return responses.MethodNotAllowedResponse(self, *args, **kwargs)
def Forbidden(self, *args, **kwargs):
return responses.ForbiddenResponse(self, *args, **kwargs)
def BadRequest(self, *args, **kwargs):
return responses.BadRequestResponse(self, *args, **kwargs)
def Unauthorized(self, *args, **kwargs):
return responses.UnauthorizedResponse(self, *args, **kwargs)
def NoContent(self, *args, **kwargs):
return responses.NoContentResponse(self, *args, **kwargs)
def Entity(self, *args, **kwargs):
return responses.EntityResponse(self, *args, **kwargs)
def Collection(self, *args, **kwargs):
return responses.CollectionResponse(self, *args, **kwargs)
|
viswimmer1/PythonGenerator | data/python_files/33000414/test_rendering.py | Python | gpl-2.0 | 1,936 | 0.001033 | import unittest
import warnings
from pyramid.tests.test_config import dummyfactory
cla | ss TestRenderingConfiguratorMixin(unittest.TestCase):
def _makeOne(self, *arg, **kw):
from pyramid.config import Configurator
config = Configurator(*arg, **kw)
return config
def test_set_renderer_globals_factory(self):
from pyramid.interfaces import IRendererGlobalsFactory
config = self._makeOne(autocommit=True)
factory = object()
with warnings.catch_warnings():
warnings.filterwarnin | gs('ignore')
config.set_renderer_globals_factory(factory)
self.assertEqual(
config.registry.getUtility(IRendererGlobalsFactory),
factory)
def test_set_renderer_globals_factory_dottedname(self):
from pyramid.interfaces import IRendererGlobalsFactory
config = self._makeOne(autocommit=True)
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
config.set_renderer_globals_factory(
'pyramid.tests.test_config.dummyfactory')
self.assertEqual(
config.registry.getUtility(IRendererGlobalsFactory),
dummyfactory)
def test_add_renderer(self):
from pyramid.interfaces import IRendererFactory
config = self._makeOne(autocommit=True)
renderer = object()
config.add_renderer('name', renderer)
self.assertEqual(config.registry.getUtility(IRendererFactory, 'name'),
renderer)
def test_add_renderer_dottedname_factory(self):
from pyramid.interfaces import IRendererFactory
config = self._makeOne(autocommit=True)
import pyramid.tests.test_config
config.add_renderer('name', 'pyramid.tests.test_config')
self.assertEqual(config.registry.getUtility(IRendererFactory, 'name'),
pyramid.tests.test_config)
|
revanthkolli/osf.io | website/addons/dataverse/views/hgrid.py | Python | apache-2.0 | 2,011 | 0.000497 | # -*- coding: utf-8 -*-
from website.addons.dataverse.client import get_dataset, get_files, \
get_dataverse, connect_from_settings
from website.project.decorators import must_be_contributor_or_public
from website.project.decorators import must_have_addon
from website.util import rubeus
def dataverse_hgrid_root(node_addon, auth, **kwargs):
node = node_addon.owner
user_settings = node_addon.user_settings
default_version = 'latest-published'
version = 'latest-published' if not node.can_edit(auth) else default_version
# Quit if no dataset linked
if not node_addon.complete:
return []
connection = connect_from_settings(user_settings)
dataverse = get_dataverse(connection, node_addon.dataverse_alias)
dataset = get_dataset(dataverse, node_addon.dataset_doi)
# Quit if doi does not produce a dataset
if dataset is None:
return []
published_files = get_files(dataset, published=True)
can_edit = node.can_edit(auth)
# Produce draft version or quit if no published version is available
if not published_files:
if can_edit:
version = 'latest'
else:
return []
permissions = {
'edit': can_edit and not node.is_registration,
'view': node.can_view(auth)
}
ur | ls = {
'publish': node.api_url_for('dataverse_publish_dataset'),
'publishBoth': node.api_url_for('dataverse_publish_both')
}
return [rubeus.build_addon_root(
node_addon,
node_addon.dataset,
u | rls=urls,
permissions=permissions,
dataset=node_addon.dataset,
doi=dataset.doi,
dataverse=dataverse.title,
hasPublishedFiles=bool(published_files),
dataverseIsPublished=dataverse.is_published,
version=version,
)]
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_root_folder_public(node_addon, auth, **kwargs):
return dataverse_hgrid_root(node_addon, auth=auth)
|
Anachronos/teslabot | teslabot/plugins/xdcc/__init__.py | Python | mit | 20 | 0 | _ | _author_ | _ = 'Xach'
|
hasteur/advent_of_code | 2015/puzzle16.py | Python | gpl-2.0 | 2,024 | 0.027668 | import re
class Aunt:
name = ''
def __init__(self, name):
self.name = name
self.count = {'children':-1,
'cats' : -1,
'samoyeds' : -1,
'pomeranians' : -1,
'akitas' : -1,
'vizslas' : -1,
'goldfish' : -1,
'trees' : -1,
'cars' : -1,
'perfumes' : -1}
def setCount(self, key, value):
self.count[key.strip()] = value
def filterKey(self, key, value):
if s | elf.count[key] == -1:
return True
else:
return self.count[key] == value
def filterCalib(self, key, value):
if self.count[key] == -1:
return True
else:
if key in ('cats', 'trees'):
return self.count[key] > value
if key in ('pomeranians','goldfish'):
return self.count[key] < value
return self.count[key] == valu | e
def filterRule(AuList, key, val):
RetAu = []
for Aun in AuList:
if Aun.filterCalib(key, val):
RetAu.append(Aun)
return RetAu
AuntList = []
fh = open('puzzle16.txt','r')
match_rule = re.compile('(?P<Name>Sue [0-9]{1,3}): (?P<attrib>.+)$')
for line in fh:
matches = match_rule.match(line)
a = Aunt(matches.group('Name'))
attrib_list = matches.group('attrib').split(',')
for attri in attrib_list:
key = attri.split(': ')[0]
val = int(attri.split(': ')[1])
a.setCount(key, val)
AuntList.append(a)
a = None
print len(AuntList)
F1 = filterRule(AuntList, 'children', 3)
print len(F1)
F2 = filterRule(F1, 'cats', 7)
print len(F2)
F3 = filterRule(F2, 'samoyeds', 2)
print len(F3)
F4 = filterRule(F3, 'pomeranians', 3)
print len(F4)
F5 = filterRule(F4, 'akitas', 0)
print len(F5)
F6 = filterRule(F5, 'vizslas', 0)
print len(F6)
F7 = filterRule(F6, 'goldfish', 5)
print len(F7)
F8 = filterRule(F7, 'trees', 3)
print len(F8)
F9 = filterRule(F8, 'cars', 2)
print len(F9)
F10 = filterRule(F9, 'perfumes', 1)
print len(F10)
|
sam-m888/gprime | gprime/proxy/referencedbyselection.py | Python | gpl-2.0 | 23,926 | 0.000293 | #
# gPrime - A web-based genealogy program
#
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Proxy class for the Gramps databases. Returns objects which are
referenced by a person, or through a chain of references starting with
a person.
"""
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from .proxybase import ProxyDbBase
from ..lib import (Person, Family, Source, Citation, Event, Media,
Place, Repository, Note, Tag)
class ReferencedBySelectionProxyDb(ProxyDbBase):
"""
A proxy to a Gramps database. This proxy will act like a Gramps
database, but returning all objects which are referenced by a
selection, or by an object that is referenced by an object which
is eventually referenced by one of the selected objects.
"""
def __init__(self, dbase, all_people=False):
"""
Create a new ReferencedByPeopleProxyDb instance.
:param all_people: if True, get all people, and the items they link to;
if False, get all people that are connected to
something, and all of items they link to.
:type all_people: boolean
"""
ProxyDbBase.__init__(self, dbase)
self.reset_references()
# If restricted_to["Person"] is a set, restrict process to
# them, and do not process others outside of them
self.restricted_to = {"Person": None}
# Build lists of referenced objects
# iter through whatever object(s) you want to start
# the trace.
self.queue = []
if all_people:
# Do not add references to those not already included
self.restricted_to["Person"] = [x for x in
self.db.iter_person_handles()]
# Spread activation to all other items:
for handle in self.restricted_to["Person"]:
if handle:
self.queue_object("Person", handle)
else:
# get rid of orphaned people:
# first, get all of the links from people:
for person in self.db.iter_people():
self.queue_object("Person", person.handle, False)
# save those people:
self.restricted_to["Person"] = self.referenced["Person"]
# reset, and just follow those people
self.reset_references()
for handle in self.restricted_to["Person"]:
if handle:
self.queue_object("Person", handle)
# process:
while len(self.queue):
obj_type, handle, reference = self.queue.pop()
self.process_object(obj_type, handle, reference)
self.__tables = {
'Person':
{
"handle_func": self.get_person_from_handle,
"gid_func": self.get_person_from_gid,
"class_func": Person,
"cursor_func": self.get_person_cursor,
"handles_func": self.get_person_handles,
"iter_func": self.iter_people,
"count_func": self.get_number_of_people,
},
'Family':
{
"handle_func": self.get_family_from_handle,
"gid_func": self.get_family_from_gid,
"class_func": Family,
"cursor_func": self.get_family_cursor,
"handles_func": self.get_family_handles,
"iter_func": self.iter_families,
"count_func": self.get_number_of_families,
},
'Source':
{
"handle_func": self.get_source_from_handle,
"gid_func": self.get_source_from_gid,
"class_func": Source,
"cursor_func": self.get_source_cursor,
"handles_func": self.get_source_handles,
"iter_func": self.iter_sources,
"count_func": self.get_number_of_sources,
},
'Citation':
{
"handle_func": self.get_citation_from_handle,
"gid_func": self.get_citation_from_gid,
"class_func": Citation,
"cursor_func": self.get_citation_cursor,
"handles_func": self.get_citation_handles,
"iter_func": self.iter_citations,
"count_func": self.get_number_o | f_citations,
},
'Event':
{
"handle_func": self.get_event_from_handle,
| "gid_func": self.get_event_from_gid,
"class_func": Event,
"cursor_func": self.get_event_cursor,
"handles_func": self.get_event_handles,
"iter_func": self.iter_events,
"count_func": self.get_number_of_events,
},
'Media':
{
"handle_func": self.get_media_from_handle,
"gid_func": self.get_media_from_gid,
"class_func": Media,
"cursor_func": self.get_media_cursor,
"handles_func": self.get_media_handles,
"iter_func": self.iter_media,
"count_func": self.get_number_of_media,
},
'Place':
{
"handle_func": self.get_place_from_handle,
"gid_func": self.get_place_from_gid,
"class_func": Place,
"cursor_func": self.get_place_cursor,
"handles_func": self.get_place_handles,
"iter_func": self.iter_places,
"count_func": self.get_number_of_places,
},
'Repository':
{
"handle_func": self.get_repository_from_handle,
"gid_func": self.get_repository_from_gid,
"class_func": Repository,
"cursor_func": self.get_repository_cursor,
"handles_func": self.get_repository_handles,
"iter_func": self.iter_repositories,
"count_func": self.get_number_of_repositories,
},
'Note':
{
"handle_func": self.get_note_from_handle,
"gid_func": self.get_note_from_gid,
"class_func": Note,
"cursor_func": self.get_note_cursor,
"handles_func": self.get_note_handles,
"iter_func": self.iter_notes,
"count_func": self.get_number_of_notes,
},
'Tag':
{
"handle_func": self.get_tag_from_handle,
"gid_func": None,
"class_func": Tag,
"cursor_func": self.get_tag_cursor,
"handles_func": self.get_tag_handles,
"iter_func": self.iter_tags,
"count_func": self.get_number_of_tags,
}
}
def get_table_func(self, table=None, func=None):
"""
Private implementation of get_table_func.
"""
if table is None:
return list(self.__tables.keys())
elif func is None:
return self.__tables[table]
elif func in self.__tables[table].keys():
return self.__tables[table][func]
else:
|
dennisobrien/bokeh | examples/app/taylor.py | Python | bsd-3-clause | 2,101 | 0.004284 | import numpy as np
import sympy as sy
from bokeh.core.properties import value
from bokeh.io import curdoc
from bokeh.layouts import column, widgetbox
from bokeh.models import ColumnDataSource, Legend, LegendItem, Slider, TextInput, PreText
from bokeh.plotting import figure
xs = sy.Symbol('x')
expr = sy.exp(-xs)*sy.sin(xs)
def taylor(fx, xs, order, x_range=(0, 1), n=200):
x0, x1 = x_range
x = np.linspace(float(x0), float(x1), n)
fy = sy.lambdify(xs, fx, modules=['numpy'])(x)
tx = fx.series(xs, n=order).removeO()
if tx.is_Number:
ty = np.zeros_like(x)
ty.fill(float(tx))
else:
ty = sy.lambdify(xs, tx, modules=['numpy'])(x)
return x, fy, ty
source = ColumnDataSource(data=dict(x=[], fy=[], ty=[]))
p = figure(x_range=(-7,7), y_range=(-100, 200), plot_width=800, plot_height=400)
line_f = p.line(x="x", y="fy", line_color="navy", line_width=2, source=source)
line_t = p.line(x="x", y="ty", line_color="firebrick", line_width=2, source=source)
p.background_fill_color = "lightgrey"
legend = Legend(location="top_right")
legend.items = [
LegendItem(label=value("%s" % expr), renderers=[line_f]),
LegendItem(label=value("taylor(%s)" % e | xpr), renderers=[line_t]),
]
p.add_layout(legend)
def update():
try:
expr = sy.sympify(text.value, dict(x=xs))
exc | ept Exception as exception:
errbox.text = str(exception)
else:
errbox.text = ""
x, fy, ty = taylor(expr, xs, slider.value, (-2*sy.pi, 2*sy.pi), 200)
p.title.text = "Taylor (n=%d) expansion comparison for: %s" % (slider.value, expr)
legend.items[0].label = value("%s" % expr)
legend.items[1].label = value("taylor(%s)" % expr)
source.data = dict(x=x, fy=fy, ty=ty)
slider = Slider(start=1, end=20, value=1, step=1, title="Order")
slider.on_change('value', lambda attr, old, new: update())
text = TextInput(value=str(expr), title="Expression:")
text.on_change('value', lambda attr, old, new: update())
errbox = PreText()
update()
inputs = widgetbox(text, slider, errbox, width=400)
curdoc().add_root(column(inputs, p))
|
andysim/psi4 | psi4/driver/procrouting/proc_table.py | Python | gpl-2.0 | 10,293 | 0.014864 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with a *procedures* dictionary specifying available quantum
chemical methods.
"""
from __future__ import print_function
from __future__ import absolute_import
from . import proc
from . import interface_cfour
# never import wrappers or aliases into this file
# Procedure lookup tables
procedures = {
'energy': {
'hf' : proc.run_scf,
'scf' : proc.run_scf,
'mcscf' : proc.run_mcscf,
'dcft' : proc.run_dcft,
'mp3' : proc.select_mp3,
'mp2.5' : proc.select_mp2p5,
'mp2' : proc.select_mp2,
'omp2' : proc.select_omp2,
'scs-omp2' : proc.run_occ,
'scs(n)-omp2' : proc.run_occ,
'scs-omp2-vdw' : proc.run_occ,
'sos-omp2' : proc.run_occ,
'sos-pi-omp2' : proc.run_occ,
'omp3' : proc.select_omp3,
'scs-omp3' : proc.run_occ,
'scs(n)-omp3' : proc.run_occ,
'scs-omp3-vdw' : proc.run_occ,
'sos-omp3' : proc.run_occ,
'sos-pi-omp3' : proc.run_occ,
'olccd' : proc.select_olccd,
'omp2.5' : proc.select_omp2p5,
'dfocc' : proc.run_dfocc, # full control over dfocc
'qchf' : proc.run_qchf,
'ccd' : proc.run_dfocc,
'sapt0' : proc.run_sapt,
'ssapt0' : proc.run_sapt,
'sapt2' : proc.run_sapt,
'sapt2+' : proc.run_sapt,
'sapt2+(3)' : proc.run_sapt,
'sapt2+3' : proc.run_sapt,
| 'sapt2+(ccd)' : proc.run_sapt,
'sapt2+(3)(ccd)': proc.run_sapt,
'sapt2+3(ccd)' : proc.run_sapt,
'sapt2+dmp2' : proc.run_sapt,
'sapt2+(3)dmp2' : proc.run_sapt,
'sapt2+3dmp2' : proc | .run_sapt,
'sapt2+(ccd)dmp2' : proc.run_sapt,
'sapt2+(3)(ccd)dmp2' : proc.run_sapt,
'sapt2+3(ccd)dmp2' : proc.run_sapt,
'sapt0-ct' : proc.run_sapt_ct,
'sapt2-ct' : proc.run_sapt_ct,
'sapt2+-ct' : proc.run_sapt_ct,
'sapt2+(3)-ct' : proc.run_sapt_ct,
'sapt2+3-ct' : proc.run_sapt_ct,
'sapt2+(ccd)-ct' : proc.run_sapt_ct,
'sapt2+(3)(ccd)-ct' : proc.run_sapt_ct,
'sapt2+3(ccd)-ct' : proc.run_sapt_ct,
'fisapt0' : proc.run_fisapt,
'ccenergy' : proc.run_ccenergy, # full control over ccenergy
'ccsd' : proc.select_ccsd,
'ccsd(t)' : proc.select_ccsd_t_,
'ccsd(at)' : proc.select_ccsd_at_,
'cc2' : proc.run_ccenergy,
'cc3' : proc.run_ccenergy,
'mrcc' : proc.run_mrcc, # interface to Kallay's MRCC program
'bccd' : proc.run_bccd,
'bccd(t)' : proc.run_bccd,
'eom-ccsd' : proc.run_eom_cc,
'eom-cc2' : proc.run_eom_cc,
'eom-cc3' : proc.run_eom_cc,
'detci' : proc.run_detci, # full control over detci
'mp' : proc.run_detci, # arbitrary order mp(n)
'zapt' : proc.run_detci, # arbitrary order zapt(n)
'cisd' : proc.select_cisd,
'cisdt' : proc.run_detci,
'cisdtq' : proc.run_detci,
'ci' : proc.run_detci, # arbitrary order ci(n)
'fci' : proc.run_detci,
'casscf' : proc.run_detcas,
'rasscf' : proc.run_detcas,
'adc' : proc.run_adc,
# 'cphf' : proc.run_libfock,
# 'cis' : proc.run_libfock,
# 'tdhf' : proc.run_libfock,
# 'cpks' : proc.run_libfock,
# 'tda' : proc.run_libfock,
# 'tddft' : proc.run_libfock,
'psimrcc' : proc.run_psimrcc,
'psimrcc_scf' : proc.run_psimrcc_scf,
'qcisd' : proc.run_fnocc,
'qcisd(t)' : proc.run_fnocc,
'mp4' : proc.select_mp4,
'mp4(sdq)' : proc.run_fnocc,
'fno-ccsd' : proc.select_fnoccsd,
'fno-ccsd(t)' : proc.select_fnoccsd_t_,
'fno-qcisd' : proc.run_fnocc,
'fno-qcisd(t)' : proc.run_fnocc,
'fno-mp3' : proc.run_fnocc,
'fno-mp4(sdq)' : proc.run_fnocc,
'fno-mp4' : proc.run_fnocc,
'fno-lccd' : proc.run_cepa,
'fno-lccsd' : proc.run_cepa,
'fno-cepa(0)' : proc.run_cepa,
'fno-cepa(1)' : proc.run_cepa,
'fno-cepa(3)' : proc.run_cepa,
'fno-acpf' : proc.run_cepa,
'fno-aqcc' : proc.run_cepa,
'fno-cisd' : proc.run_cepa,
'lccd' : proc.select_lccd,
'lccsd' : proc.run_cepa,
'cepa(0)' : proc.run_cepa,
'cepa(1)' : proc.run_cepa,
'cepa(3)' : proc.run_cepa,
'acpf' : proc.run_cepa,
'aqcc' : proc.run_cepa,
'efp' : proc.run_efp,
'dmrg-scf' : proc.run_dmrgscf,
'dmrg-caspt2' : proc.run_dmrgscf,
'dmrg-ci' : proc.run_dmrgci,
# Upon adding a method to this list, add it to the docstring in energy() below
# Aliases are discouraged. If you must add an alias to this list (e.g.,
# lccsd/cepa(0)), please search the whole driver to find uses of
# name in return values and psi variables and extend the logic to
# encompass the new alias.
},
'gradient' : {
'hf' : proc.run_scf_gradient,
'scf' : proc.run_scf_gradient,
'cc2' : proc.run_ccenergy_gradient,
'ccsd' : proc.select_ccsd_gradient,
'ccsd(t)' : proc.select_ccsd_t__gradient,
'mp2' : proc.select_mp2_gradient,
'eom-ccsd' : proc.run_eom_cc_gradient,
'dcft' : proc.run_dcft_gradient,
'omp2' : proc.select_omp2_gradient,
'omp3' : proc.select_omp3_gradient,
'mp3' : proc.select_mp3_gradient,
'mp2.5' : proc.select_mp2p5_gradient,
'omp2.5' : proc.select_omp2p5_gradient,
'lccd' : proc.select_lccd_gradient,
'olccd' : proc.select_olccd_gradient,
'ccd' : proc.run_dfocc_gradient,
# Upon adding a method to this list, add it to the docstring in optimize() below
},
'hessian' : {
# Upon adding a method to this list, add it to the docstring in frequency() below
'hf' : proc.run_scf_hessian,
'scf' |
cutoffthetop/zeitnow | bootstrap.py | Python | bsd-2-clause | 4,121 | 0.003155 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
$Id$
"""
import os, shutil, sys, tempfile, urllib2
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
is_jython = sys.platform.startswith('java')
# parsing arguments
parser = OptionParser(
'This is a custom version of the zc.buildout %prog script. It is '
'intended to meet a temporary need if you encounter problems with '
'the zc.buildout 1.5 release.')
parser.add_option("-v", "--version", dest="version", default='1.4.4',
help='Use a specific zc.buildout version. *This '
'bootstrap script defaults to '
'1.4.4, unlike usual buildpout bootstrap scripts.*')
parser.add_option("-d", "--distribute",
action="store_true", dest="distribute", default=False,
help="Use Disribute rather than Setuptools.")
parser.add_option("-c", None, action="store", dest="config_file",
help=("Specify the path to the buildout configuration "
"file to be used."))
options, args = parser.parse_args( | )
# if -c was provided, we push it back into args for buildout' main function
if options.confi | g_file is not None:
args += ['-c', options.config_file]
if options.version is not None:
VERSION = '==%s' % options.version
else:
VERSION = ''
USE_DISTRIBUTE = options.distribute
args = args + ['bootstrap']
to_reload = False
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
to_reload = True
raise ImportError
except ImportError:
ez = {}
if USE_DISTRIBUTE:
exec urllib2.urlopen('http://python-distribute.org/distribute_setup.py'
).read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0, no_fake=True)
else:
exec urllib2.urlopen('http://peak.telecommunity.com/dist/ez_setup.py'
).read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0)
if to_reload:
reload(pkg_resources)
else:
import pkg_resources
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
def quote (c):
return c
ws = pkg_resources.working_set
if USE_DISTRIBUTE:
requirement = 'distribute'
else:
requirement = 'setuptools'
env = dict(os.environ,
PYTHONPATH=
ws.find(pkg_resources.Requirement.parse(requirement)).location
)
cmd = [quote(sys.executable),
'-c',
quote('from setuptools.command.easy_install import main; main()'),
'-mqNxd',
quote(tmpeggs)]
if 'bootstrap-testing-find-links' in os.environ:
cmd.extend(['-f', os.environ['bootstrap-testing-find-links']])
cmd.append('zc.buildout' + VERSION)
if is_jython:
import subprocess
exitcode = subprocess.Popen(cmd, env=env).wait()
else: # Windows prefers this, apparently; otherwise we would prefer subprocess
exitcode = os.spawnle(*([os.P_WAIT, sys.executable] + cmd + [env]))
assert exitcode == 0
ws.add_entry(tmpeggs)
ws.require('zc.buildout' + VERSION)
import zc.buildout.buildout
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
|
webbers/pyhammer | pyhammer/tasks/helpers/makenugetpackagetask.py | Python | mit | 1,540 | 0.012338 | # -*- coding: utf-8 -*-
import os
import ntpath
import glob
from | pyhammer.tasks.taskbase import TaskBase
from pyhammer.utils import execProg
class MakeNugetPackageTask(TaskBase):
"""Make Nuget Package Task"""
def __init__( self, csProjectPath, solutionDir, tempDir, publishDir, visualStudioVersion = "12.0" ):
super(MakeNugetPackageTask, self).__init__()
self.csPr | ojectPath = csProjectPath
self.solutionDir = solutionDir
self.tempDir = tempDir
self.publishDir = publishDir
self.visualStudioVersion = visualStudioVersion
def do( self ):
self.reporter.message( "MAKING NUGET PACKAGE FROM: %s" % ntpath.basename(self.csProjectPath) )
result = execProg("msbuild.exe \"%s\" /t:Rebuild /p:Configuration=Release;VisualStudioVersion=%s" % ( self.csProjectPath, self.visualStudioVersion ), self.reporter, self.solutionDir)
if(not result == 0):
return False
result = execProg("nuget.exe pack \"%s\" -IncludeReferencedProjects -prop Configuration=Release -OutputDirectory \"%s\"" % (self.csProjectPath, self.tempDir), self.reporter)
if(not result == 0):
return False
if(self.publishDir is not None):
os.chdir(self.tempDir)
files = glob.glob("*.nupkg")
result = execProg("nuget.exe push \"%s\" -Source \"%s\"" % (files[0], self.publishDir), self.reporter)
os.remove(files[0])
if(not result == 0):
return False
return True
|
rwl/PyCIM | CIM14/IEC61970/Dynamics/MetaBlockState.py | Python | mit | 2,325 | 0.00172 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permissio | n is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall | be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Dynamics.MetaBlockConnectable import MetaBlockConnectable
class MetaBlockState(MetaBlockConnectable):
def __init__(self, MemberOf_MetaBlock=None, *args, **kw_args):
"""Initialises a new 'MetaBlockState' instance.
@param MemberOf_MetaBlock:
"""
self._MemberOf_MetaBlock = None
self.MemberOf_MetaBlock = MemberOf_MetaBlock
super(MetaBlockState, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["MemberOf_MetaBlock"]
_many_refs = []
def getMemberOf_MetaBlock(self):
return self._MemberOf_MetaBlock
def setMemberOf_MetaBlock(self, value):
if self._MemberOf_MetaBlock is not None:
filtered = [x for x in self.MemberOf_MetaBlock.MetaBlockState if x != self]
self._MemberOf_MetaBlock._MetaBlockState = filtered
self._MemberOf_MetaBlock = value
if self._MemberOf_MetaBlock is not None:
if self not in self._MemberOf_MetaBlock._MetaBlockState:
self._MemberOf_MetaBlock._MetaBlockState.append(self)
MemberOf_MetaBlock = property(getMemberOf_MetaBlock, setMemberOf_MetaBlock)
|
bankonme/www.freedomsponsors.org | djangoproject/core/migrations/0046_auto__add_field_issue_status.py | Python | agpl-3.0 | 20,741 | 0.007907 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Issue.status'
db.add_column('core_issue', 'status',
self.gf('django.db.models.fields.CharField')(default='EMPTY', max_length=40),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Issue.status'
db.delete_column('core_issue', 'status')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'bitcoin_frespo.moneysent': {
'Meta': {'object_name': 'MoneySent'},
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'from_address': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'to_address': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'transaction_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '8'})
},
'bitcoin_frespo.receiveaddress': {
'Meta': {'object_name': 'ReceiveAddress'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'available': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.actionlog': {
'Meta': {'object_name': 'ActionLog'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'entity': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']", 'null': 'True'}),
'issue_comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.IssueComment']", 'null': 'True'}),
'new_json': ('django.db.models.fields.TextField', [], {}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']", 'null': 'True'}),
'old_json': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Project']", 'null': 'True'}),
'solution': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Solution']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'core.issue': {
'Meta': {'object_name': 'Issue'},
'createdByUser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('djan | go.db.models.fields.TextField', [], {'nul | l': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_feedback': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public_suggestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Project']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'trackerURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'trackerURL_noprotocol': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updatedDate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'core.issuecomment': {
'Meta': {'object_name': 'IssueComment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"})
},
|
stackforge/monasca-api | monasca_api/tests/policy/base.py | Python | apache-2.0 | 3,063 | 0 | # Copyright 2017 OP5 AB
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o | r agreed to in writing, software
# distributed under the License i | s distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for policy unit tests."""
import os
import fixtures
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_policy import opts as policy_opts
from oslo_serialization import jsonutils
from oslotest import base
from monasca_api.common.policy import policy_engine
CONF = cfg.CONF
class FakePolicy(object):
def list_rules(self):
return []
class ConfigFixture(config_fixture.Config):
def setUp(self):
super(ConfigFixture, self).setUp()
CONF(args=[],
prog='api',
project='monasca',
version=0,
description='Testing monasca-api.common')
policy_opts.set_defaults(CONF)
class BaseTestCase(base.BaseTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.useFixture(ConfigFixture(CONF))
self.useFixture(EmptyPolicyFixture())
@staticmethod
def conf_override(**kw):
"""Override flag variables for a test."""
group = kw.pop('group', None)
for k, v in kw.items():
CONF.set_override(k, v, group)
class EmptyPolicyFixture(fixtures.Fixture):
"""Override the policy with an empty policy file.
This overrides the policy with a completely fake and synthetic
policy file.
"""
def setUp(self):
super(EmptyPolicyFixture, self).setUp()
self._prepare_policy()
policy_engine.POLICIES = FakePolicy()
policy_engine.reset()
policy_engine.init()
self.addCleanup(policy_engine.reset)
def _prepare_policy(self):
policy_dir = self.useFixture(fixtures.TempDir())
policy_file = os.path.join(policy_dir.path, 'policy.yaml')
policy_rules = jsonutils.loads('{}')
self.add_missing_default_rules(policy_rules)
with open(policy_file, 'w') as f:
jsonutils.dump(policy_rules, f)
BaseTestCase.conf_override(policy_file=policy_file,
group='oslo_policy')
BaseTestCase.conf_override(policy_dirs=[], group='oslo_policy')
def add_missing_default_rules(self, rules):
policies = FakePolicy()
for rule in policies.list_rules():
if rule.name not in rules:
rules[rule.name] = rule.check_str
|
katakumpo/nicedjango | tests/test_compact_csv_reader.py | Python | mit | 1,175 | 0 | from __future__ import unicode_literals
from django.utils import six
import pytest
from nicedjango.utils.compact_csv import CsvReader
@pytest.fixture
def stream():
csv = | b'''"a\xc2\x96b\\"c'd\\re\\nf,g\\\\",1,NULL,""\n'''.decode('utf-8')
return six.StringIO(csv)
def test_reader_raw(stream):
r = CsvReader(stream, replacements=(), p | reserve_quotes=True, symbols=(),
replace_digits=False)
assert list(r) == [['''"a\x96b\\"c'd\\re\\nf,g\\\\"''', '1', 'NULL', '""']]
def test_reader_none(stream):
r = CsvReader(stream, replacements=(), preserve_quotes=True,
replace_digits=False)
assert list(r) == [['''"a\x96b\\"c'd\\re\\nf,g\\\\"''', '1', None, '""']]
def test_reader_quotes(stream):
r = CsvReader(stream, replacements=(), replace_digits=False)
assert list(r) == [['''a\x96b\\"c'd\\re\\nf,g\\\\''', '1', None, '']]
def test_reader_replace(stream):
r = CsvReader(stream, replace_digits=False)
assert list(r) == [['''a\x96b"c'd\re\nf,g\\''', '1', None, '']]
def test_reader_replace_digit(stream):
r = CsvReader(stream)
assert list(r) == [['''a\x96b"c'd\re\nf,g\\''', 1, None, '']]
|
opmuse/opmuse | opmuse/test/test_security.py | Python | agpl-3.0 | 1,210 | 0 | # Copyright 2012-2015 Mattias Fliesberg
#
# This file is part of opmuse.
#
# opmuse is free so | ftware: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# opmuse is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public | License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with opmuse. If not, see <http://www.gnu.org/licenses/>.
from . import setup_db, teardown_db
from opmuse.security import User, hash_password
class TestSecurity:
def setup_method(self):
setup_db(self)
def teardown_method(self):
teardown_db(self)
def test_login(self):
user = self.session.query(User).filter_by(login="admin").one()
hashed = hash_password("admin", user.salt)
assert hashed == user.password
hashed = hash_password("wrong", user.salt)
assert hashed != user.password
|
Ksynko/django-crm | sample_project/external_apps/crm/views.py | Python | bsd-3-clause | 23,073 | 0.00234 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# $Id: views.py 425 2009-07-14 03:43:01Z tobias $
# ----------------------------------------------------------------------------
#
# Copyright (C) 2008-2009 Caktus Consulting Group, LLC
#
# This file is part of django-crm and was originally extracted from minibooks.
#
# django-crm is published under a BSD-style license.
#
# You should have received a copy of the BSD License along with django-crm.
# If not, see <http://www.opensource.org/licenses/bsd-license.php>.
#
import datetime
import difflib
import simplejson as json
from django.template import RequestContext, Context, loader
from django.shortcuts import get_object_or_404, render_to_response
from django.contrib.auth.decorators import login_required, permission_required
from django.http import HttpResponseRedirect
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponse, Http404
from django.db.models import Q
from django.db import transaction
from django.contrib.auth.models import User, Group
from django.contrib.auth import authenticate, login
from django.core.mail import send_mass_mail, send_mail
from django.views.decorators.csrf import csrf_exempt
from contactinfo.helpers import create_edit_location
from contactinfo import models as contactinfo
from crm import models as crm
from crm import forms as crm_forms
from crm.decorators import render_with
@login_required
@render_with('crm/dashboard.html')
def dashboard(request):
if request.contact:
# soonest first
upcoming_interactions = request.contact.interactions.select_related(
'cdr',
'contacts',
'project',
'project__business',
).filter(completed=False).order_by('date')
# most recent first
recent_interactions = request.contact.interactions.select_related(
'cdr',
'contacts',
'project',
'project__business',
).filter(completed=True)[:6]
if hasattr(request.contact, 'contact_projects'):
projects = request.contact.contact_projects.order_by(
'status__sort_order',
'status__label',
'type__sort_order',
'type__label',
'name',
).select_related(
'business',
'status',
'type',
).exclude(status__label__in=('Closed', 'Complete'))
from timepiece import models as timepiece
svn_accessible = timepiece.Project.objects.filter(
contacts=request.contact,
project_relationships__types__slug__startswith='svn-'
).values_list('trac_environment', flat=True).distinct()
for project in projects:
project.svn_accessible = project.trac_environment in svn_accessible
else:
projects = []
else:
upcoming_interactions = []
recent_interactions = []
projects = []
context = {
'recent_interactions': recent_interactions,
'upcoming_interactions': upcoming_interactions,
'projects': projects,
}
try:
from minibooks.ledger.models import Exchange
# there are no permissions on this view, so all DB access
# must filter by request.user
context['recent_exchanges'] = Exchange.objects.filter(
business__type='business',
business__contacts=request.contact,
).select_related('type', 'business')[:10]
except ImportError:
pass
return context
@login_required
def quick_search(request):
if request.GET:
form = crm_forms.QuickSearchForm(request.GET)
if form.is_valid():
return HttpResponseRedirect(form.save())
raise Http404
@permission_required('crm.view_profile')
@render_with('crm/person/list.html')
def list_people(request):
form = crm_forms.SearchForm(request.GET)
if form.is_valid() and 'search' in request.GET:
search = form.cleaned_data['search']
people = crm.Contact.objects.filter(type='individual').filter(
Q(first_name__icontains=search) |
Q(last_name__icontains=search)
)
if people.count() == 1:
return HttpResponseRedirect(
reverse(
'view_person',
kwargs={'person_id':people[0].id}
)
)
else:
people = crm.Contact.objects.filter(type='individual')
context = {
'form': form,
# it'd be nice if we could grab 'phones' too, but that isn't supported:
# http://code.djangoproject.com/ticket/6432
'people': people.select_related('user').order_by('sort_name'),
'phone_types': contactinfo.Phone.PHONE_TYPES,
}
return context
@login_required
@render_with('crm/person/view.html')
def view_person(request, person_id):
try:
person = crm.Contact.objects.filter(
type='individual'
).select_related().get(pk=person_id)
except crm.Contact.DoesNotExist:
raise Http404
interactions = person.interactions.order_by('-date').select_related(
'contacts',
'project',
'project__business',
)[0:10]
context = {
'contact': person,
'interactions': interactions,
'can_edit': person.is_editable_by(request.user),
}
return context
@render_with('crm/contact/email.html')
def email_contact(request, contact_slug):
try:
contact = crm.Contact.objects.select_related().get(slug=contact_slug)
except crm.Contact.DoesNotExist:
raise Http404
if request.POST:
form = crm_forms.EmailContactForm(
request.POST,
recipients=[contact.email],
)
if form.is_valid():
form.save()
request.notifications.add(
'Message sent successfully to | %s.' % contact
)
view_person_url = reverse('view_person', args=[contact.id])
return HttpResponseRedirect(view_person_url)
else:
form = crm_forms.EmailContactForm(recipients=[contact.email])
return {
'form': form,
'contact': | contact,
}
@login_required
@transaction.commit_on_success
@render_with('crm/person/create_edit.html')
def create_edit_person(request, person_id=None):
if person_id:
profile = get_object_or_404(crm.Contact, pk=person_id)
try:
location = profile.locations.all()[0]
except IndexError:
location = None
else:
profile = None
location = None
new_location = not location
if profile and not profile.is_editable_by(request.user):
return HttpResponseRedirect(reverse('auth_login'))
if request.POST:
pre_save = ''
if profile:
pre_save = profile.as_text_block()
profile_form = crm_forms.ProfileForm(
request.POST,
instance=profile,
request=request,
)
location, location_saved, location_context = create_edit_location(
request,
location,
profile_form.is_valid(),
)
if location_saved:
# no e-mail will be sent if dict is empty or None
email = {
# 'template': 'path/to/email/template.txt',
# 'subject': 'Welcome!',
# 'extra_context': { 'somekey': 'someval' },
}
saved_profile = profile_form.save()
if new_location:
saved_profile.locations.add(location)
if saved_profile:
message = 'Person updated successfully'
else:
message = 'New person created successfully'
request.notifications.add(message)
post_save = saved_profile.as_text_block()
try:
group = Group.objects.get(name='Contact Notifications')
|
andrewklau/openshift-tools | ansible/roles/lib_openshift_3.2/library/oc_version.py | Python | apache-2.0 | 33,153 | 0.002624 | #!/usr/bin/env python # pylint: disable=too-many-lines
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self | .namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, crea | te=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oadm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
|
googleapis/python-bigquery | samples/tests/test_client_query_w_array_params.py | Python | apache-2.0 | 792 | 0 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Versio | n 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See t | he License for the specific language governing permissions and
# limitations under the License.
from .. import client_query_w_array_params
def test_client_query_w_array_params(capsys,):
client_query_w_array_params.client_query_w_array_params()
out, err = capsys.readouterr()
assert "James" in out
|
Petraea/jsonbot | jsb/lib/periodical.py | Python | mit | 8,994 | 0.005003 | # gozerbot/periodical.py
#
#
""" provide a periodic structure. """
__author__ = "Wijnand 'tehmaze' Modderman - http://tehmaze.com"
__license__ = "BSD License"
## jsb imports
from jsb.utils.exception import handle_exception
from jsb.utils.trace import calledfrom, whichmodule
from jsb.utils.locking import lockdec
from jsb.utils.timeutils import strtotime
from jsb.lib.callbacks import callbacks
import jsb.lib.threads as thr
## basic imorts
import datetime
import sys
import time
import thread
import types
import logging
## locks
plock = thread.allocate_lock()
locked = lockdec(plock)
## defines
pidcount = 0
## JobError class
class JobError(Exception):
""" job error exception. """
pass
## Job class
class Job(object):
""" job to be scheduled. """
group = ''
pid = -1
def __init__(self):
global pidcount
pidcount += 1
self.pid = pidcount
def id(self):
""" return job id. """
return self.pid
def member(self, group):
""" check for group membership. """ |
return self.group == group
def do(self):
""" try the callback. """
try: self.func(*self.args, **self.kw)
except Exception, ex: handle_exception()
class JobAt(Job):
""" job to run at a specific time/interval/repeat. """
def __init__(self, start, interval, repeat, func, *args, **kw):
Job.__init__(self)
self.func = func
self.args = args |
self.kw = kw
self.repeat = repeat
self.description = ""
self.counts = 0
if type(start) in [types.IntType, types.FloatType]: self.next = float(start)
elif type(start) in [types.StringType, types.UnicodeType]:
d = strtotime(start)
if d and d > time.time(): self.next = d
else: raise JobError("invalid date/time")
if type(interval) in [types.IntType]:
d = datetime.timedelta(days=interval)
self.delta = d.seconds
else: self.delta = interval
def __repr__(self):
""" return a string representation of the JobAt object. """
return '<JobAt instance next=%s, interval=%s, repeat=%d, function=%s>' % (str(self.next), str(self.delta), self.repeat, str(self.func))
def check(self):
""" run check to see if job needs to be scheduled. """
if self.next <= time.time():
logging.info('running %s - %s' % (str(self.func), self.description))
self.func(*self.args, **self.kw)
self.next += self.delta
self.counts += 1
if self.repeat > 0 and self.counts >= self.repeat: return False
return True
class JobInterval(Job):
""" job to be scheduled at certain interval. """
def __init__(self, interval, repeat, func, *args, **kw):
Job.__init__(self)
self.func = func
self.args = args
self.kw = kw
self.repeat = int(repeat)
self.counts = 0
self.interval = float(interval)
self.description = ""
self.next = time.time() + self.interval
self.group = None
logging.warn('scheduled next run of %s in %d seconds' % (str(self.func), self.interval))
def __repr__(self):
return '<JobInterval instance next=%s, interval=%s, repeat=%d, group=%s, function=%s>' % (str(self.next), str(self.interval), self.repeat, self.group, str(self.func))
def check(self):
""" run check to see if job needs to be scheduled. """
if self.next <= time.time():
logging.info('running %s - %s' % (str(self.func), self.description))
self.next = time.time() + self.interval
thr.start_new_thread(self.do, ())
self.counts += 1
if self.repeat > 0 and self.counts >= self.repeat: return False
return True
class Periodical(object):
""" periodical scheduler. """
def __init__(self):
self.jobs = []
self.running = []
self.run = True
def size(self):
return len(self.jobs)
def addjob(self, sleeptime, repeat, function, description="" , *args, **kw):
""" add a periodical job. """
job = JobInterval(sleeptime, repeat, function, *args, **kw)
job.group = calledfrom(sys._getframe())
job.description = str(description) or whichmodule()
self.jobs.append(job)
return job.pid
def changeinterval(self, pid, interval):
""" change interval of of peridical job. """
for i in periodical.jobs:
if i.pid == pid:
i.interval = interval
i.next = time.time() + interval
def looponce(self, bot, event):
""" loop over the jobs. """
for job in self.jobs:
if job.next <= time.time():
self.runjob(job)
def runjob(self, job):
""" run a periodical job. """
if not job.check(): self.killjob(job.id())
else: self.running.append(job)
def kill(self):
""" kill all jobs invoked by another module. """
group = calledfrom(sys._getframe())
self.killgroup(group)
def killgroup(self, group):
""" kill all jobs with the same group. """
def shoot():
""" knock down all jobs belonging to group. """
deljobs = [job for job in self.jobs if job.member(group)]
for job in deljobs:
self.jobs.remove(job)
try: self.running.remove(job)
except ValueError: pass
logging.warn('killed %d jobs for %s' % (len(deljobs), group))
del deljobs
return shoot()
def killjob(self, jobId):
""" kill one job by its id. """
def shoot():
deljobs = [x for x in self.jobs if x.id() == jobId]
numjobs = len(deljobs)
for job in deljobs:
self.jobs.remove(job)
try: self.running.remove(job)
except ValueError: pass
del deljobs
return numjobs
return shoot()
def interval(sleeptime, repeat=0):
""" interval decorator. """
group = calledfrom(sys._getframe())
def decorator(function):
decorator.func_dict = function.func_dict
def wrapper(*args, **kw):
job = JobInterval(sleeptime, repeat, function, *args, **kw)
job.group = group
job.description = whichmodule()
periodical.jobs.append(job)
logging.warn('new interval job %d with sleeptime %d' % (job.id(), sleeptime))
return wrapper
return decorator
def at(start, interval=1, repeat=1):
""" at decorator. """
group = calledfrom(sys._getframe())
def decorator(function):
decorator.func_dict = function.func_dict
def wrapper(*args, **kw):
job = JobAt(start, interval, repeat, function, *args, **kw)
job.group = group
job.description = whichmodule()
periodical.jobs.append(job)
wrapper.func_dict = function.func_dict
return wrapper
return decorator
def persecond(function):
""" per second decorator. """
minutely.func_dict = function.func_dict
group = calledfrom(sys._getframe())
def wrapper(*args, **kw):
job = JobInterval(1, 0, function, *args, **kw)
job.group = group
job.description = whichmodule()
periodical.jobs.append(job)
logging.debug('new interval job %d running per second' % job.id())
return wrapper
def minutely(function):
""" minute decorator. """
minutely.func_dict = function.func_dict
group = calledfrom(sys._getframe())
def wrapper(*args, **kw):
job = JobInterval(60, 0, function, *args, **kw)
job.group = group
job.description = whichmodule()
periodical.jobs.append(job)
logging.warn('new interval job %d running minutely' % job.id())
return wrapper
def hourly(function):
""" hour decorator. """
logging.warn('@hourly(%s)' % str(function))
hourly.func_dict = function.func_dict
group = calledfrom(sys._getframe())
def wrapper(*args, **kw):
|
JoeJasinski/evesch | evesch/euser/urls.py | Python | gpl-2.0 | 878 | 0.018223 | from django.conf.urls import patterns, include, url
from evesch.euser.views import *
urlpatterns = patterns('',
url('^(?P<username>\w{1,30})/$', 'evesch.euser.views.user_view', {'template_name':'euser/user_view.html'}, name='euser_user_view'),
url('^ajax/lookup_users/$', 'evesch.euser.views.lookup_users', {'template_name':'euser/ajax/lookup_users.html'}, name='euser_ajax_lookup_users'),
url('^ajax/lookup_org_users/(?P<org_short_name>[\w_-]{1,20})/$', 'evesch.euser.views.lookup_users', {'template_name':'euser/ajax/lookup_users.html'}, name='euser_ajax_lookup_org_users'),
url('^(?P<username>\w{1,30})/ical_(?P<user | _feed_hash>\w{1,24}).ics$', 'evesch.core.feed.views.user_ics', {}, name='core_feed_user_ics'),
url('^(?P<username>\w{1,30})/feed_(?P<user_feed_hash>\w{1,24}).rss$', 'e | vesch.core.feed.views.user_rss', {}, name='core_feed_user_rss'),
)
|
lewisodriscoll/sasview | src/sas/sasgui/perspectives/calculator/resolution_calculator_panel.py | Python | bsd-3-clause | 58,547 | 0.002238 | # pylint: disable=attribute-defined-outside-init
"""
This software was developed by the University of Tennessee as part of the
Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
project funded by the US National Science Foundation.
See the license text in license.txt
copyright 2008, 2009, 2010 University of Tennessee
"""
import wx
import sys
import os
import matplotlib
import math
import logging
#Use the WxAgg back end. The Wx one takes too long to render
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2Wx as Toolbar
from matplotlib.backend_bases import FigureManagerBase
# Wx-Pylab magic for displaying plots within an application's window.
from matplotlib import _pylab_helpers
# The Figure object is used to create backend-independent plot representations.
from matplotlib.figure import Figure
#from sas.guicomm.events import StatusEvent
from sas.sascalc.calculator.resolution_calculator import ResolutionCalculator
from sas.sasgui.guiframe.events import StatusEvent
from sas.sasgui.perspectives.calculator.calculator_widgets import OutputTextCtrl
from sas.sasgui.perspectives.calculator.calculator_widgets import InputTextCtrl
from wx.lib.scrolledpanel import ScrolledPanel
from math import fabs
from sas.sasgui.perspectives.calculator import calculator_widgets as widget
from sas.sasgui.guiframe.documentation_window import DocumentationWindow
logger = logging.getLogger(__name__)
_BOX_WIDTH = 100
_Q_DEFAULT = 0.0
#Slit length panel size
if sys.platform.count("win32") > 0:
PANEL_TOP = 0
PANEL_WIDTH = 525
PANEL_HEIGHT = 653
FONT_VARIANT = 0
IS_WIN = True
else:
PANEL_TOP = 60
PANEL_WIDTH = 540
PANEL_HEIGHT = 662
FONT_VARIANT = 1
IS_WIN = False
_SOURCE_MASS = {'Alpha':6.64465620E-24,
'Deuteron':3.34358320E-24,
'Neutron':1.67492729E-24,
'Photon': 0.0,
'Proton':1.67262137E-24,
'Triton':5.00826667E-24}
class ResolutionCalculatorPanel(ScrolledPanel):
"""
Provides the Resolution calculator GUI.
"""
## Internal nickname for the window, used by the AUI manager
window_name = "Q Resolution Estimator"
## Name to appear on the window title bar
window_caption = ""
## Flag to tell the AUI manager to put this panel in the center pane
CENTER_PANE = True
def __init__(self, parent, *args, **kwds):
kwds["size"] = (PANEL_WIDTH * 2, PANEL_HEIGHT)
kwds["style"] = wx.FULL_REPAINT_ON_RESIZE
ScrolledPanel.__init__(self, parent, *args, **kwds)
self.SetupScrolling()
self.parent = parent
# input defau | lts
self.qx = []
self.qy = []
# dQ defaults
self.sigma_r = None
self.sigma_phi = None
self.sigma_1d = None
# monchromatic or polychromatic
self.wave_color = 'mono'
self.num_wave = 10
self.spectrum_dic = {}
# dQ 2d image
self.image = None
# results of sig | mas
self.sigma_strings = ' '
#Font size
self.SetWindowVariant(variant=FONT_VARIANT)
# Object that receive status event
self.resolution = ResolutionCalculator()
# Source selection dic
self.source_mass = _SOURCE_MASS
#layout attribute
self.hint_sizer = None
# detector coordinate of estimation of sigmas
self.det_coordinate = 'cartesian'
self.source_cb = None
self._do_layout()
def _define_structure(self):
"""
Define the main sizers building to build this application.
"""
self.main_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.vertical_l_sizer = wx.BoxSizer(wx.VERTICAL)
self.vertical_r_spacer = wx.BoxSizer(wx.VERTICAL)
self.vertical_r_frame = wx.StaticBox(self, -1, '')
self.vertical_r_sizer = wx.StaticBoxSizer(self.vertical_r_frame,
wx.VERTICAL)
self.box_source = wx.StaticBox(self, -1, str(self.window_caption))
self.boxsizer_source = wx.StaticBoxSizer(self.box_source, wx.VERTICAL)
self.mass_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.intensity_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.wavelength_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.wavelength_spread_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.source_aperture_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sample_aperture_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.source2sample_distance_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sample2sample_distance_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sample2detector_distance_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.detector_size_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.detector_pix_size_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.input_sizer = wx.BoxSizer(wx.VERTICAL)
self.output_sizer = wx.BoxSizer(wx.VERTICAL)
self.hint_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.button_sizer = wx.BoxSizer(wx.HORIZONTAL)
def _layout_mass(self):
"""
Fill the sizer containing mass
"""
# get the mass
mass_value = str(self.resolution.mass)
self.mass_txt = wx.StaticText(self, -1, 'Source: ')
self.mass_hint = "Mass of Neutrons m = %s [g]" % str(self.resolution.mass)
self.source_cb = wx.ComboBox(self, -1,
style=wx.CB_READONLY,
name='%s' % mass_value)
# Sort source name because wx2.9 on Mac does not support CB_SORT
# Custom sorting
source_list = []
for key, _ in self.source_mass.iteritems():
name_source = str(key)
source_list.append(name_source)
source_list.sort()
for idx in range(len(source_list)):
self.source_cb.Append(source_list[idx], idx)
self.source_cb.SetStringSelection("Neutron")
wx.EVT_COMBOBOX(self.source_cb, -1, self._on_source_selection)
# combo box for color
self.wave_color_cb = wx.ComboBox(self, -1,
style=wx.CB_READONLY,
name='color')
# two choices
self.wave_color_cb.Append('Monochromatic')
self.wave_color_cb.Append('TOF')
self.wave_color_cb.SetStringSelection("Monochromatic")
wx.EVT_COMBOBOX(self.wave_color_cb, -1, self._on_source_color)
source_hint = "Source Selection: Affect on"
source_hint += " the gravitational contribution.\n"
source_hint += "Mass of %s: m = %s [g]" % \
('Neutron', str(self.resolution.mass))
self.mass_txt.SetToolTipString(source_hint)
self.mass_sizer.AddMany([(self.mass_txt, 0, wx.LEFT, 15),
(self.source_cb, 0, wx.LEFT, 15),
(self.wave_color_cb, 0, wx.LEFT, 15)])
def _layout_intensity(self):
"""
Fill the sizer containing intensity
"""
# get the intensity
intensity_value = str(self.resolution.intensity)
intensity_unit_txt = wx.StaticText(self, -1, '[counts/s]')
intensity_txt = wx.StaticText(self, -1, 'Intensity: ')
self.intensity_tcl = InputTextCtrl(self, -1,
size=(_BOX_WIDTH, -1))
intensity_hint = "Intensity of Neutrons"
self.intensity_tcl.SetValue(intensity_value)
self.intensity_tcl.SetToolTipString(intensity_hint)
self.intensity_sizer.AddMany([(intensity_txt, 0, wx.LEFT, 15),
(self.intensity_tcl, 0, wx.LEFT, 15),
(intensity_unit_txt, 0, wx.LEFT, 10)])
def _layout_wavelength(self):
"""
Fill the sizer containing wavelength
"""
# get the wavelength
wavelength_value = str(self.resolution.get_wavelength())
wavelength_unit_txt = wx.StaticText(self, -1, '[A]')
wavelength_txt = wx. |
Lorquas/subscription-manager | src/rhsmlib/facts/collector.py | Python | gpl-2.0 | 4,009 | 0.000499 | from __future__ import print_function, division, absolute_import
# Copyright (c) 2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import logging
import os
import platform
from rhsmlib.facts import collection
log = logging.getLogger(__name__)
def get_arch(prefix=None):
"""Get the systems architecture.
This relies on portable means, like uname to determine
a high level system arch (ie, x86_64, ppx64,etc).
We need that so we can decide how to collect the
arch specific hardware information.
Also support a 'prefix' arg that allows us to override
the results. The contents of the '/prefix/arch' will
override the arch. The 'prefix' arg defaults to None,
equiv to '/'. This is intended only for test purposes.
Returns a string containing the arch."""
DEFAULT_PREFIX = '/'
ARCH_FILE_NAME = 'arch'
prefix = prefix or DEFAULT_PREFIX
if prefix == DEFAULT_PREFIX:
return platform.machine()
arch_file = os.path.join(prefix, ARCH_FILE_NAME)
try:
with open(arch_file, 'r') as arch_fd:
return arch_fd.read().strip()
except IOError as e:
# If we specify a prefix, and there is no 'arch' file,
# consider that fatal.
log.exception(e)
raise
# An empty FactsCollector should just return an empty dict on get_all()
class FactsCollector(object):
def __init__(self, arch=None, prefix=None, testing=None,
hardware_methods=None, collected_hw_info=None):
"""Base class for facts collecting classes.
self._collected_hw_info will reference the passed collected_hw_info
arg. When possible this should be a reference (or copy) to all of the facts
collected in this run. Some collection methods need to alter behavior
based on facts collector from other modules/classes.
self._collected_hw_info isn't meant to be altered as a side effect, but
no promises."""
self.allhw = {}
self.prefix = prefix or ''
self.testing = testing or False
self._collected_hw_info = collected_hw_info
# we need this so we can decide which of the
# arch specific code bases to follow
self.arch = arch or get_arch(prefix=self.prefix)
self.hardware_methods = hardware_methods or []
def collect(self):
"""Return a FactsCollection iterable."""
facts_dict = collection.FactsDict()
facts_dict.update(self.get_all())
facts_collection = collection. | FactsCollection(facts_dict=facts_dict)
return facts_collection
def get_all(self):
# try each hardware method, and try/except around, since
# these tend to be fragile
all_hw_info = {}
for hardware_method in self.hardw | are_methods:
info_dict = {}
try:
info_dict = hardware_method()
except Exception as e:
log.warn("Hardware detection [%s] failed: %s" % (hardware_method.__name__, e))
all_hw_info.update(info_dict)
return all_hw_info
class StaticFactsCollector(FactsCollector):
def __init__(self, static_facts=None, **kwargs):
super(FactsCollector, self).__init__(**kwargs)
if static_facts is None:
static_facts = {}
self.static_facts = static_facts
self.static_facts.setdefault("system.certificate_version", "3.2")
def get_all(self):
return self.static_facts
|
GoogleCloudPlatform/guest-test-infra | container_images/pytest/example/src/application/main_test.py | Python | apache-2.0 | 688 | 0.001453 | # Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# | You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitation | s under the License.
from main import three_plus_four
def test_three_plus_four():
assert three_plus_four() == 7
|
tankywoo/simiki | simiki/generators.py | Python | mit | 15,605 | 0.000064 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convert Markdown file to html, which is embeded in html template.
"""
from __future__ import (print_function, with_statement, unicode_literals,
absolute_import)
import os
import os.path
import io
import copy
import re
import traceback
import warnings
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import markdown
import yaml
from jinja2 import (Environment, FileSystemLoader, TemplateError)
from simiki import jinja_exts
from simiki.utils import import_string
from simiki.compat import is_py2, is_py3, basestring
if is_py3:
from functools import cmp_to_key
PLAT_LINE_SEP = '\n'
class BaseGenerator(object):
"""Base generator class"""
def __init__(self, site_config, base_path):
"""
:site_config: site global configuration parsed from _config.yml
:base_path: root path of wiki directory
"""
self.site_config = copy.deepcopy(site_config)
self.base_path = base_path
self._templates = {} # templates cache
self._template_vars = self._get_template_vars()
_template_path = os.path.join(
self.base_path,
site_config["themes_dir"],
site_config["theme"]
)
if not os.p | ath.exists(_template_path):
raise Exception("Theme `{0}' not exists".format(_template_path))
self.env = Environment(
loader=FileSystemLoader(_template_path)
)
self._jinja_load_exts()
def _jinja_load_exts(self):
"""Load jinja custom filters and extensions"""
for _filter in jinja_exts.filters:
| self.env.filters[_filter] = getattr(jinja_exts, _filter)
def get_template(self, name):
"""Return the template by layout name"""
if name not in self._templates:
try:
self._templates[name] = self.env.get_template(name + '.html')
except TemplateError:
# jinja2.exceptions.TemplateNotFound will get blocked
# in multiprocessing?
exc_msg = "unable to load template '{0}.html'\n{1}" \
.format(name, traceback.format_exc())
raise Exception(exc_msg)
return self._templates[name]
def _get_template_vars(self):
"""Return the common template variables"""
template_vars = {
'site': self.site_config,
}
# if site.root endswith '/`, remove it.
site_root = template_vars['site']['root']
if site_root.endswith('/'):
template_vars['site']['root'] = site_root[:-1]
return template_vars
class PageGenerator(BaseGenerator):
def __init__(self, site_config, base_path, tags=None):
super(PageGenerator, self).__init__(site_config, base_path)
self._tags = tags
self._reset()
def _reset(self):
"""Reset the global self variables"""
self._src_file = None # source file path relative to base_path
self.meta = None
self.content = None
def to_html(self, src_file, include_draft=False):
"""Load template, and generate html
:src_file: the filename of the source file. This can either be an
absolute filename or a filename relative to the base path.
:include_draft: True/False, include draft pages or not to generate
"""
self._reset()
self._src_file = os.path.relpath(src_file, self.base_path)
self.meta, self.content = self.get_meta_and_content()
# Page set `draft: True' mark current page as draft, and will
# be ignored if not forced generate include draft pages
if not include_draft and self.meta.get('draft', False):
return None
layout = self.get_layout(self.meta)
template_vars = self.get_template_vars(self.meta, self.content)
template = self.get_template(layout)
html = template.render(template_vars)
return html
@property
def src_file(self):
return self._src_file
@src_file.setter
def src_file(self, filename):
self._src_file = os.path.relpath(filename, self.base_path)
def get_meta_and_content(self, do_render=True):
meta_str, content_str = self.extract_page(self._src_file)
meta = self.parse_meta(meta_str)
# This is the most time consuming part
if do_render and meta.get('render', True):
content = self._parse_markup(content_str)
else:
content = content_str
return meta, content
def get_layout(self, meta):
"""Get layout config in meta, default is `page'"""
if "layout" in meta:
# Compatible with previous version, which default layout is "post"
# XXX Will remove this checker in v2.0
if meta["layout"] == "post":
warn_msg = "{0}: layout `post' is deprecated, use `page'" \
.format(self._src_file)
if is_py2:
# XXX: warnings message require str, no matter whether
# py2 or py3; but in py3, bytes message is ok in simple
# test, but failed in unittest with py3.3, ok with py3.4?
warn_msg = warn_msg.encode('utf-8')
warnings.warn(warn_msg, DeprecationWarning)
layout = "page"
else:
layout = meta["layout"]
else:
layout = "page"
return layout
def get_template_vars(self, meta, content):
"""Get template variables, include site config and page config"""
template_vars = copy.deepcopy(self._template_vars)
page = {"content": content}
page.update(meta)
page.update({'relation': self.get_relation()})
template_vars.update({'page': page})
return template_vars
def get_category_and_file(self):
"""Get the name of category and file(with extension)"""
src_file_relpath_to_source = \
os.path.relpath(self._src_file, self.site_config['source'])
category, filename = os.path.split(src_file_relpath_to_source)
return (category, filename)
@staticmethod
def extract_page(filename):
"""Split the page file texts by triple-dashed lines, return the mata
and content.
:param filename: the filename of markup page
returns:
meta_str (str): page's meta string
content_str (str): html parsed from markdown or other markup text.
"""
regex = re.compile('(?sm)^---(?P<meta>.*?)^---(?P<body>.*)')
with io.open(filename, "rt", encoding="utf-8") as fd:
match_obj = re.match(regex, fd.read())
if match_obj:
meta_str = match_obj.group('meta')
content_str = match_obj.group('body')
else:
raise Exception('extracting page with format error, '
'see <http://simiki.org/docs/metadata.html>')
return meta_str, content_str
def parse_meta(self, yaml_str):
"""Parse meta from yaml string, and validate yaml filed, return dict"""
try:
meta = yaml.load(yaml_str, Loader=yaml.FullLoader)
except yaml.YAMLError as e:
e.extra_msg = 'yaml format error'
raise
category, src_fname = self.get_category_and_file()
dst_fname = src_fname.replace(
".{0}".format(self.site_config['default_ext']), '.html')
meta.update({'category': category, 'filename': dst_fname})
if 'tag' in meta:
if isinstance(meta['tag'], basestring):
_tags = [t.strip() for t in meta['tag'].split(',')]
meta.update({'tag': _tags})
if "title" not in meta:
raise Exception("no 'title' in meta")
return meta
def _parse_markup(self, markup_text):
"""Parse markup text to html
Only support Markdown for now.
"""
markdown_extensions = self._set_markdown_extensio |
fuziontech/svb | svb/test/test_requestor.py | Python | mit | 15,897 | 0.000126 | import datetime
import unittest2
import urlparse
from mock import Mock, ANY
import svb
from svb.six.moves.urllib import parse
from svb.test.helper import SvbUnitTestCase
VALID_API_METHODS = ('get', 'post', 'delete', 'patch')
class GMT1(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=1)
def dst(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "Europe/Prague"
class APIHeaderMatcher(object):
EXP_KEYS = [
'Authorization',
'SVB-Version',
'User-Agent',
'X-SVB-Client-User-Agent',
'X-Timestamp',
'X-Signature',
]
METHOD_EXTRA_KEYS = {
"post": ["Content-Type"],
"patch": ["Content-Type"],
}
def __init__(self, api_key=None, extra={}, request_method=None,
user_agent=None, app_info=None):
self.request_method = request_method
self.api_key = api_key or svb.api_key
self.extra = extra
self.user_agent = user_agent
self.app_info = app_info
def __eq__(self, other):
return (self._keys_match(other) and
self._auth_match(other) and
self._user_agent_match(other) and
self._x_svb_ua_contains_app_info(other) and
self._extra_match(other))
def _keys_match(self, other):
expected_keys = list(set(self.EXP_KEYS + self.extra.keys()))
if self.request_method is not None and self.request_method in \
self.METHOD_EXTRA_KEYS:
expected_keys.extend(self.METHOD_EXTRA_KEYS[self.request_method])
return (sorted(other.keys()) == sorted(expected_keys))
def _auth_match(self, other):
return other['Authorization'] == "Bearer %s" % (self.api_key,)
def _user_agent_match(self, other):
if self.user_agent is not None:
return other['User-Agent'] == self.user_agent
return True
def _x_svb_ua_contains_app_info(self, other):
if self.app_info:
ua = svb.util.json.loads(other['X-SVB-Client-User-Agent'])
if 'application' not in ua:
return False
return ua['application'] == self.app_info
return True
def _extra_match(self, other):
for k, v in self.extra.iteritems():
if other[k] != v:
return False
return True
class JSONMatcher(object):
def ordered(self, obj):
if isinstance(obj, dict):
return sorted((k, self.ordered(str(v))) for k, v in obj.items())
if isinstance(obj, list):
return sorted(self.ordered(str(x)) for x in obj)
else:
return obj
def __init__(self, expected):
if isinstance(expected, dict):
self.expected = self.ordered(expected)
elif isinstance(expected, svb.six.text_type):
self.expected = self.ordered(svb.util.json.loads(expected))
def __eq__(self, other):
return self.expected == self.ordered(svb.util.json.loads(other))
class QueryMatcher(object):
def __init__(self, expected):
self.expected = sorted(expected)
def __eq__(self, other):
query = parse.urlsplit(other).query or other
parsed = svb.util.parse_qsl(query)
return self.expected == sorted(parsed)
class UrlMatcher(object):
def __init__(self, expected):
self.exp_parts = parse.urlsplit(expected)
def __eq__(self, other):
other_parts = parse.urlsplit(other)
for part in ('scheme', 'netloc', 'path', 'fragment'):
expected = getattr(self.exp_parts, part)
actual = getattr(other_parts, part)
if expected != actual:
print 'Expected %s "%s" but got "%s"' % (
part, expected, actual)
return False
q_matcher = QueryMatcher(svb.util.parse_qsl(self.exp_parts.query))
return q_matcher == other
class APIRequestorRequestTests(SvbUnitTestCase):
ENCODE_INPUTS = {
'dict': {
'astring': 'bar',
'anint': 5,
'anull': None,
'adatetime': datetime.datetime(2013, 1, 1, tzinfo=GMT1()),
'atuple': (1, 2),
'adict': {'foo': 'bar', 'boz': 5},
'alist': ['foo', 'bar'],
},
'list': [1, 'foo', 'baz'],
'string': 'boo',
'unicode': u'\u1234',
'datetime': datetime.datetime(2013, 1, 1, second=1, tzinfo=GMT1()),
'none': None,
}
ENCODE_EXPECTATIONS = {
'dict': [
('%s[astring]', 'bar'),
('%s[anint]', 5),
('%s[adatetime]', 1356994800),
('%s[adict][foo]', 'bar'),
('%s[adict][boz]', 5),
('%s[alist][]', 'foo'),
('%s[alist][]', 'bar'),
('%s[atuple][]', 1),
('%s[atuple][]', 2),
],
'list': [
('%s[]', 1),
('%s[]', 'foo'),
('%s[]', 'baz'),
],
'string': [('%s', 'boo')],
'unicode': [('%s', svb.util.utf8(u'\u1234'))],
'datetime': [('%s', 1356994801)],
'none': [],
}
def setUp(self):
super(APIRequestorRequestTests, self).setUp()
self.http_client = Mock(svb.http_client.HTTPClient)
self.http_client._verify_ssl_certs = True
self.http_client.name = 'mockclient'
self.requestor = svb.api_requestor.APIRequestor(
client=self.http_client)
def mock_response(self, return_body, return_code, requestor=None,
headers=None):
if not requestor:
requestor = self.requestor
self.http_client.request = Mock(
return_value=(return_body, return_code, headers or {}))
def check_call(self, meth, abs_url=None, headers=None,
post_data=None, requestor=None):
if not abs_url:
abs_url = 'https://api.svb.com%s' % (self.valid_path,)
if not requestor:
requestor = self.requestor
if not headers:
headers = APIHeaderMatcher(request_method=meth)
self.http_client.request.assert_called_with(
meth, abs_url, headers, post_data)
@property
def valid_path(self):
return '/foo'
def encoder_check(self, key):
stk_key = "my%s" % (key,)
value = self.ENCODE_INPUTS[key]
expectation = [(k % (stk_key,), v) for k, v in
self.ENCODE_EXPECTATIONS[key]]
stk = []
fn = getattr(svb.api_requestor.APIRequestor, "encode_%s" % (key,))
fn(stk, stk_key, value)
if isinstance(value, dict):
expectation.sort()
stk.sort()
self.assertEqual(expectation, stk)
def _test_encode_naive_datetime(self):
stk = []
svb.api_requestor.APIRequestor.encode_datetime(
stk, 'test', datetime.datetime(2013, 1, 1))
# Naive datetimes will encode differently depending on your system
# local time. Since we don | 't know the local time of your system,
# we just check that naive encodings are within 24 hours of correct.
self.assertTrue(60 * 60 * 24 > abs(stk[0][1] - 1356994800))
def test_param_encoding(self):
self.mock_response('{}', 200)
self.requestor.request('get', '', | self.ENCODE_INPUTS)
expectation = []
for type_, values in self.ENCODE_EXPECTATIONS.iteritems():
expectation.extend([(k % (type_,), str(v)) for k, v in values])
self.check_call('get', QueryMatcher(expectation))
def test_dictionary_list_encoding(self):
params = {
'foo': {
'0': {
'bar': 'bat',
}
}
}
encoded = list(svb.api_requestor._api_encode(params))
key, value = encoded[0]
self.assertEqual('foo[0][bar]', key)
self.assertEqual('bat', value)
def test_url_construction(self):
CASES = (
('https://api.svb.com?foo=bar', '', {'foo': 'bar'}),
|
skyfsza/Logos | resources/lib/plugin_content.py | Python | gpl-2.0 | 15,952 | 0.002696 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
script.skin.helper.service
Helper service and scripts for Kodi skins
plugin_content.py
Hidden plugin entry point providing some helper features
'''
import xbmc
import xbmcplugin
import xbmcgui
import xbmcaddon
from simplecache import SimpleCache
from utils import log_msg, KODI_VERSION, log_exception, urlencode, getCondVisibility
from metadatautils import KodiDb, Tmdb, get_clean_image, process_method_on_list
import urlparse
import sys
import os
class PluginContent:
'''Hidden plugin entry point providing some helper features'''
params = {}
win = None
def __init__(self):
self.cache = SimpleCache()
self.kodi_db = KodiDb()
self.win = xbmcgui.Window(10000)
try:
self.params = dict(urlparse.parse_qsl(sys.argv[2].replace('?', '').lower().decode("utf-8")))
log_msg("plugin called with parameters: %s" % self.params)
self.main()
except Exception as exc:
log_exception(__name__, exc)
xbmcplugin.endOfDirectory(handle=int(sys.argv[1]))
# cleanup when done processing
self.close()
def close(self):
'''Cleanup Kodi Cpython instances'''
self.cache.close()
del self.win
def main(self):
'''main action, load correct function'''
action = self.params.get("action", "")
if self.win.getProperty("SkinHelperShutdownRequested"):
# do not proceed if kodi wants to exit
log_msg("%s --> Not forfilling request: Kodi is exiting" % __name__, xbmc.LOGWARNING)
xbmcplugin.endOfDirectory(handle=int(sys.argv[1]))
else:
try:
if hasattr(self.__class__, action):
# launch module for action provided by this plugin
getattr(self, action)()
else:
# legacy (widget) path called !!!
self.load_widget()
except Exception as exc:
log_exception(__name__, exc)
def load_widget(self):
'''legacy entrypoint called (widgets are moved to seperate addon), start redirect...'''
action = self.params.get("action", "")
newaddon = "script.skin.helper.widgets"
log_msg("Deprecated method: %s. Please reassign your widgets to get rid of this message. -"
"This automatic redirect will be removed in the future" % (action), xbmc.LOGWARNING)
paramstring = ""
for key, value in self.params.iteritems():
paramstring += ",%s=%s" % (key, value)
if getCondVisibility("System.HasAddon(%s)" % newaddon):
# TEMP !!! for backwards compatability reasons only - to be removed in the near future!!
import imp
addon = xbmcaddon.Addon(newaddon)
addon_path = addon.getAddonInfo('path').decode("utf-8")
imp.load_source('plugin', os.path.join(addon_path, "plugin.py"))
from plugin import main
main.Main()
del addon
else:
# trigger install of the addon
if KODI_VERSION > 16:
xbmc.executebuiltin("InstallAddon(%s)" % newaddon)
else:
xbmc.executebuiltin("RunPlugin(plugin://%s)" % newaddon)
def playchannel(self):
'''play channel from widget helper'''
params = {"item": {"channelid": int(self.params["channelid"])}}
self.kodi_db.set_json("Player.Open", params)
def playrecording(self):
'''retrieve the recording and play to get resume working'''
recording = self.kodi_db.recording(self.params["recordingid"])
params = {"item": {"recordingid": recording["recordingid"]}}
self.kodi_db.set_json("Player.Open", params)
# manua | lly seek because passing resume to the player json cmd doesn't seem to work
if recording["resume"].get("position"):
for i in range(50):
if getCondVisibility("Player.HasVideo"):
break
xbmc.sleep(50)
xbmc.Pla | yer().seekTime(recording["resume"].get("position"))
def launch(self):
'''launch any builtin action using a plugin listitem'''
if "runscript" in self.params["path"]:
self.params["path"] = self.params["path"].replace("?", ",")
xbmc.executebuiltin(self.params["path"])
def playalbum(self):
'''helper to play an entire album'''
xbmc.executeJSONRPC(
'{ "jsonrpc": "2.0", "method": "Player.Open", "params": { "item": { "albumid": %d } }, "id": 1 }' %
int(self.params["albumid"]))
def smartshortcuts(self):
'''called from skinshortcuts to retrieve listing of all smart shortcuts'''
import skinshortcuts
skinshortcuts.get_smartshortcuts(self.params.get("path", ""))
@staticmethod
def backgrounds():
'''called from skinshortcuts to retrieve listing of all backgrounds'''
import skinshortcuts
skinshortcuts.get_backgrounds()
def widgets(self):
'''called from skinshortcuts to retrieve listing of all widgetss'''
import skinshortcuts
skinshortcuts.get_widgets(self.params.get("path", ""), self.params.get("sublevel", ""))
def resourceimages(self):
'''retrieve listing of specific resource addon images'''
from resourceaddons import get_resourceimages
addontype = self.params.get("addontype", "")
for item in get_resourceimages(addontype, True):
listitem = xbmcgui.ListItem(item[0], label2=item[2], path=item[1], iconImage=item[3])
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),
url=item[1], listitem=listitem, isFolder=False)
xbmcplugin.endOfDirectory(handle=int(sys.argv[1]))
def extrafanart(self):
'''helper to display extrafanart in multiimage control in the skin'''
fanarts = eval(self.params["fanarts"])
# process extrafanarts
for count, item in enumerate(fanarts):
listitem = xbmcgui.ListItem("fanart%s" % count, path=item)
listitem.setProperty('mimetype', 'image/jpeg')
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=item, listitem=listitem)
xbmcplugin.endOfDirectory(handle=int(sys.argv[1]))
def genrebackground(self):
'''helper to display images for a specific genre in multiimage control in the skin'''
genre = self.params.get("genre").split(".")[0]
arttype = self.params.get("arttype", "fanart")
randomize = self.params.get("random", "false") == "true"
mediatype = self.params.get("mediatype", "movies")
if genre and genre != "..":
filters = [{"operator": "is", "field": "genre", "value": genre}]
if randomize:
sort = {"method": "random", "order": "descending"}
else:
sort = {"method": "sorttitle", "order": "ascending"}
items = getattr(self.kodi_db, mediatype)(
sort=sort,
filters=filters, limits=(0, 50))
for item in items:
image = get_clean_image(item["art"].get(arttype, ""))
if image:
image = get_clean_image(item["art"][arttype])
listitem = xbmcgui.ListItem(image, path=image)
listitem.setProperty('mimetype', 'image/jpeg')
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=image, listitem=listitem)
xbmcplugin.endOfDirectory(handle=int(sys.argv[1]))
def getcastmedia(self):
'''helper to display get all media for a specific actor'''
name = self.params.get("name")
if name:
all_items = self.kodi_db.castmedia(name)
all_items = process_method_on_list(self.kodi_db.prepare_listitem, all_items)
all_items = process_method_on_list(self.kodi_db.create_listitem, all_items)
xbmcplugin.addDirectoryItems(int(sys.argv[1]), all_items, len(all_items))
xbmcplugin.endOfDirectory(ha |
sosilent/euca | clc/eucadmin/eucadmin/utils.py | Python | gpl-3.0 | 2,194 | 0.002279 | # Copyright (c) 2011, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON AN | Y THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Mitch Garnaat mgarnaat@eucalyptus.com
import os
def _walk_recursive(paths, fn, *params):
symlinks = []
for path in pa | ths:
fn(path, *params)
for dirpath, dirs, files in os.walk(path):
for d in dirs:
fullpath = os.path.join(dirpath, d)
if os.path.islink(fullpath):
symlinks.append(fullpath)
else:
fn(fullpath, *params)
for f in files:
fn(os.path.join(dirpath, f), *params)
return symlinks
def chown_recursive(path, uid, gid):
path = [path]
while path:
path = _walk_recursive(path, os.chown, uid, gid)
def chmod_recursive(path, mode):
path = [path]
while path:
path = _walk_recursive(path, os.chmod, mode)
|
LaetitiaPapaxanthos/UnionCom | test.py | Python | mit | 1,911 | 0.028781 | import os
import random
import torch
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import numpy as np
import scipy.sparse as sp
from sklearn.neighbors import NearestNeighbors, KNeighborsClassifier
def align_fraction(data1, data2, params):
row1, col1 = np.shape(data1)
row2, col2 = np.shape(data2)
fraction = 0
for i in range(row1):
count = 0
diffMat = np.tile(data1[i], (row2,1)) - data2
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
for j in range(row2):
if sqDistances[j] < sqDistances[i]:
count += 1
fraction += count / row2
return fraction / row1
def transfer_accuracy(domain1, domain2, type1, type2):
knn = KNeighborsClassifier()
knn.fit(domain2, type2)
type1_predict = knn.predict(domain1)
np.savetxt("type1_predict.txt", type1_predict)
count = 0
for label1, label2 in zip(type1_predict, type1):
if label1 == label2:
count += 1
return count / len(type1)
def test_UnionCom(Project, dataset, datatype, change, params, device, test):
########## test
dataset_test = []
for i in range(len(dataset)):
dataset_test.append(torch.from_numpy(dataset[i | ]).float().to(device))
# print("saving integrated data...")
data = []
integrated_data = []
for i in range(len(dataset_test)):
data.append(Project(dataset_test[i], i))
data[i] = data[i].detach().cpu().numpy()
|
permutation = np.argsort(change)
for i in permutation:
integrated_data.append(data[i])
# np.savetxt('integrated_data{}.txt'.format(change[i]),data[i])
if test:
for i in range(len(dataset_test)-1):
# fraction = align_fraction(data[i], data[-1], params)
# print("average fraction:")
# print(fraction)
acc = transfer_accuracy(data[i], data[-1], datatype[i], datatype[-1])
print("label transfer accuracy:")
print(acc)
print("unionCom Done!")
return integrated_data |
appu1232/Discord-Selfbot | cogs/utils/menu.py | Python | gpl-3.0 | 5,866 | 0.00733 | import asyncio
class Menu:
"""An interactive menu class for Discord."""
class Submenu:
"""A metaclass of the Menu class."""
def __init__(self, name, content):
self.content = content
self.leads_to = []
self.name = name
def get_text(self):
text = ""
for idx, menu in enumerate(self.leads_to):
text += "[{}] {}\n".format(idx+1, menu.name)
return text
def get_child(self, child_idx):
try:
return self.leads_to[child_idx]
except IndexError:
raise IndexError("child index out of range")
def add_child(self, child):
self.leads_to.append(child)
class InputSubmenu:
"""A metaclass of the Menu class for submenu options that take input, instead of prompting the user to pick an option."""
def __init__(self, name, content, input_function, leads_to):
self.content = content
self.name = name
self.input_function = input_function
self.leads_to = leads_to
def next_child(self):
return self.leads_to
class ChoiceSubmenu:
"""A metaclass of the Menu class for submenu options for choosing an option from a list."""
def __init__(self, name, content, options, input_function, leads_to):
self.content = content
self.name = name
self.options = options
self.input_function = input_function
self.leads_to = leads_to
def next_child(self):
return self.leads_to
def __init__(self, main_page):
self.children = []
self.main = self.Submenu("main", main_page)
def add_child(self, child):
self.main.add_child(child)
async def start(self, ctx):
current = self.main
menu_msg = None
while True:
output = ""
if type(current) == self.Submenu:
if type(current.content) == str:
output += current.content + "\n"
elif callable(current.content):
current.content()
else:
raise TypeError("submenu body is not a str or function")
if not current.leads_to:
if not menu_msg:
menu_msg = await ctx.send("```" + output + "```")
else:
await menu_msg.edit(cont | ent="```" + output + "```")
break
output | += "\n" + current.get_text() + "\n"
output += "Enter a number."
if not menu_msg:
menu_msg = await ctx.send("```" + output + "```")
else:
await menu_msg.edit(content="```" + output + "```")
reply = await ctx.bot.wait_for("message", check=lambda m: m.author == ctx.bot.user and m.content.isdigit() and m.channel == ctx.message.channel)
await reply.delete()
try:
current = current.get_child(int(reply.content) - 1)
except IndexError:
print("Invalid number.")
break
elif type(current) == self.InputSubmenu:
if type(current.content) == list:
answers = []
for question in current.content:
await menu_msg.edit(content="```" + question + "\n\nEnter a value." + "```")
reply = await ctx.bot.wait_for("message", check=lambda m: m.author == ctx.bot.user and m.channel == ctx.message.channel)
await reply.delete()
answers.append(reply)
current.input_function(*answers)
else:
await menu_msg.edit(content="```" + current.content + "\n\nEnter a value." + "```")
reply = await ctx.bot.wait_for("message", check=lambda m: m.author == ctx.bot.user and m.channel == ctx.message.channel)
await reply.delete()
current.input_function(reply)
if not current.leads_to:
break
current = current.leads_to
elif type(current) == self.ChoiceSubmenu:
result = "```" + current.content + "\n\n"
if type(current.options) == dict:
indexes = {}
for idx, option in enumerate(current.options):
result += "[{}] {}: {}\n".format(idx+1, option, current.options[option])
indexes[idx] = option
else:
for idx, option in current.options:
result += "[{}] {}\n".format(idx+1, option)
await menu_msg.edit(content=result + "\nPick an option.```")
reply = await ctx.bot.wait_for("message", check=lambda m: m.author == ctx.bot.user and m.content.isdigit() and m.channel == ctx.message.channel)
await reply.delete()
if type(current.options) == dict:
current.input_function(reply, indexes[int(reply.content)-1])
else:
current.input_function(reply, current.options[int(reply.content)-1])
if not current.leads_to:
break
current = current.leads_to
|
varenius/salsa | USRP/usrp_gnuradio_dev/testold.py | Python | mit | 2,198 | 0.015469 | import matplotlib.pyplot as plt
import numpy as np
import sys
import time
import scipy.interpolate as ip
infile = sys.argv[1]
indata = np.load(infile)
spec = indata[0]
samp_rate = indata[1]
fftsize = indata[2]
center_freq = 1419.4 # MHz
halffft = int(0.5*fftsize)
freqs = 0.5*samp_rate*np.array(range(-halffft,halffft))/(halffft)
#plt.plot(spec)
delta_nu = samp_rate/fftsize
plt.plot(freqs,spec)
plt.xlabel('relative to center [Mhz]')
RFI = [[1419.4-0.210, 0.02],
#[1419.4-1.937, 0.015],
#[1419.4-4.4, 0.015],
#[1419.4+3.0, 0.01],
#[center_freq, 8*delta_nu] # remove dip in the center of band, always about 4 fft points wide. Use 8, else errors
]
#plt.figure()
#plt.plot(spec)
# DEFINE FLAGS in HZ
for item in RFI:
print item
RFI_freq = item[0]
RFI_width = item[1]
ch0_freq = center_freq - 0.5*samp_rate
ind_low = int(np.floor((RFI_freq-0.5*RFI_width - ch0_freq)/delta_nu))
ind_high = int(np.ceil((RFI_freq+0.5*RFI_width - ch0_freq)/delta_nu))
margin = min((ind_high-ind_low), ind_low, len(spec)-ind_high)
RFI_org = np.array([spec[ind_low-margin:ind_low], spec[ind_high:ind_high+margin]])
RFI_part = RFI_org.flatten()
xdata = range(ind_low-margin, ind_low) + range(ind_high, ind_high+margin)
print np.size(xdata), | np.size(RFI_part)
spl = ip.UnivariateSpline(xdata,RFI_part, k=1, s=0)
interpdata = spl(range(ind_low, ind_high))
print interpdata
spec[ind_low:ind_high] = interpdata[:]
plt.figure | ()
plt.plot(RFI_part)
plt.plot(interpdata)
#plt.figure()
#plt.plot(freqs, spec)
#for flag in flags:
#
# Calculate flag indices
# For each flag, interpolate flagged values (splines)
# when all flaggs are applied and interpolated, proceed with convolve!
#plt.figure()
#convspec = np.convolve(spec, [1,1,1,1], mode='same')
#w = sig.boxcar(4)
#convspec=np.convolve(w/w.sum(),spec,mode='valid')
##convspec = sig.decimate(spec, 2)
#fftsize = fftsize/2
#halffft = int(0.5*fftsize)
#convfreqs = 0.5*samp_rate*np.array(range(-halffft,halffft))/(halffft)
#print np.shape(convspec)
#print np.shape(convfreqs)
#plt.plot(convfreqs,convspec)
#plt.xlabel('relative to center [Mhz]')
plt.show()
|
jlesquembre/autopilot | tests/pypi_test.py | Python | gpl-2.0 | 1,650 | 0.001818 | import os
from urllib.error import URLError
import pytest
from distlib.index import PackageIndex
from autopilot import pypi
def test_restore_cwd(fake_project_dir):
cwd = os.getcwd()
with pypi.generate_dist(fake_project_dir.as_posix()) as (metadata, dist_files):
pass
assert cwd == os.getcwd()
def test_generate_dist(fake_project_dir):
with pypi.generate_dist(fake_project_dir.as_posix()) as (metadata, dist_files):
assert metadata.name == 'fake_project'
assert len(dist_files) == 2
for dist_file in dist_files:
assert dist_file.exists()
assert dist_file.is_file()
def test_https_default(pypi_server, fake_project_dir):
url, local_dir = pypi_server
with pytest.raises(URLError) as excinfo:
pypi.pypi_upload({'url': url,
'user': 'user',
'passeval': 'echo pass'
}, dist_dir=fake_project_dir.as_posix())
assert 'Unexpected HTTP request on what should be a secure connection' in str(excinfo.value)
def test_upload(pypi_server, fake_project_dir):
url, local_dir = pypi_server
assert local_dir.is_dir()
assert local_dir.exists()
assert len(list(local_dir.iterdir())) == 0
pypi.pypi_upload({'url': url,
'user': 'user',
'passeval': 'echo pass'
}, use_htt | ps=False, dist_dir=fake_project_dir.as_posix())
files = [f.name for f in local_dir.iterdir()]
assert len(files) == 2
assert 'fake_project-1.0. | 0-py2.py3-none-any.whl' in files
assert 'fake_project-1.0.0.tar.gz' in files
|
gencer/python-phonenumbers | python/phonenumbers/shortdata/region_SX.py | Python | apache-2.0 | 548 | 0.009124 | """Auto-ge | nerated file, do not edit by hand. SX metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_SX = PhoneMetadata(id='SX', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='9\\d{2}', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='919', example_number='919', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='919', e | xample_number='919', possible_length=(3,)),
short_data=True)
|
cglewis/wharf | wharf/favicon.py | Python | apache-2.0 | 274 | 0.007299 | from wharf import app
from flask import send_from_directory
from os | import | path
@app.route('/favicon.ico')
def favicon():
return send_from_directory(path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
|
henriquegemignani/randovania | randovania/game_description/item/item_database.py | Python | gpl-3.0 | 2,456 | 0.000407 | from dataclasses import dataclass
from typing import Dict, Tuple, Union
from randovania.game_description.item import migrations
from randovania.game_description.item.ammo import Ammo
from randovania.game_description.item.item_category import ItemCategory
from randovania.game_description.item.major_item import MajorItem
from randovania.games.game import RandovaniaGame
@dataclass(frozen=True)
class ItemDatabase:
item_categories: Dict[str, ItemCategory]
major_items: Dict[str, MajorItem]
ammo: Dict[str, Ammo]
default_items: Dict[ItemCategory, Tuple[MajorItem, ...]]
def get_item_with_name(self, name: str) -> Union[MajorItem, Ammo]:
return self.major_items.get(name) or self.ammo.get(name)
def read_database(database_data: Dict, game: RandovaniaGame) -> ItemDatabase:
"""
:param database_data:
:param game:
:return:
"""
migrations.migrate_current(database_data)
item_categories = {
name: ItemCategory.from_json(name, value)
for name, value in database_data["item_categories"].items()
}
major_items = {
name: MajorItem.from_json(name, value, game, item_categories)
for name, value in database_data["items"].items()
}
| ammo = {
name: Ammo.from_json(name, value, game, item_categories)
for name, value in database_data["ammo"].items()
}
default_items = {
item_categories[category_name]: tuple | (major_items[item_name] for item_name in value)
for category_name, value in database_data["default_items"].items()
}
return ItemDatabase(item_categories, major_items, ammo, default_items)
def write_database(database: ItemDatabase) -> Dict:
"""
:param database:
:return:
"""
item_categories = {
name: item_category.as_json
for name, item_category in database.item_categories.items()
}
major_items_data = {
name: item.as_json
for name, item in database.major_items.items()
}
ammo_data = {
name: ammo.as_json
for name, ammo in database.ammo.items()
}
default_data = {
category.name: [item.name for item in items]
for category, items in database.default_items.items()
}
return {
"schema_version": migrations.CURRENT_VERSION,
"item_categories": item_categories,
"items": major_items_data,
"ammo": ammo_data,
"default_items": default_data,
}
|
ContinuumIO/odo | odo/backends/bcolz.py | Python | bsd-3-clause | 3,424 | 0.000876 | from __future__ import absolute_import, division, print_function
import os
from bcolz import ctable, carray
import numpy as np
from toolz import keyfilter
import datashape
from datashape import discover
import shutil
from ..numpy_dtype import dshape_to_numpy
from ..append import append
from ..convert import convert, ooc_types
from ..resource import resource
from ..drop import drop
from ..chunks import chunks
keywords = ['cparams', 'dflt', 'expectedlen', 'chunklen', 'rootdir']
@discover.register((ctable, carray))
def discover_bcolz(c, **kwargs):
return datashape.from_numpy(c.shape, c.dtype)
@append.register((ctable, carray), np.ndarray)
def numpy_append_to_bcolz(a, b, **kwargs):
a.append(b)
a.flush()
return a
@append.register((ctable, carray), object)
def numpy_append_to_bcolz(a, b, **kwargs):
return append(a, convert(chunks(np.ndarray), b, **kwargs), **kwargs)
@convert.register(ctable, np.ndarray, cost=2.0)
def convert_numpy_to_bcolz_ctable(x, **kwargs):
return ctable(x, **keyfilter(keywords.__contains__, kwargs))
@convert.register(carray, np.ndarray, cost=2.0)
def convert_numpy_to_bcolz_carray(x, **kwargs):
return carray(x, **keyfilter(keywords.__contains__, kwargs))
@convert.register(np.ndarray, (carray, ctable), cost=1.0)
def convert_bcolz_to_numpy(x, **kwargs):
return x[:]
@append.register((carray, ctable), chunks(np.ndarray))
def append_carray_with_chunks(a, c, **kwargs):
for chunk in c:
append(a, chunk)
a.flush()
return a
@convert.register(chunks(np.ndarray), (ctable, carray), cost=1.2)
def bcolz_to_numpy_chunks(x, chunksize=2**20, **kwargs):
def load():
first_n = min(1000, chunksize)
first = x[:first_n]
yield first
for i in range(first_n, x.shape[0], chunksize):
yield x[i: i + chunksize]
return chunks(np.ndarray)(load)
@resource.register('.*\.bcolz/?')
def resource_bcolz(uri, dshape=None, expected_dshape=None, **kwargs):
if os.path.exists(uri):
try:
return ctable(rootdir=uri)
except IOError: # __rootdirs__ doesn't exist because we aren't a ctable
return carray(rootdir=uri)
else:
if not dshape:
raise ValueError("Must specify either existing bcolz directory or"
" valid datashape")
dshape = datashape.dshape(dshape)
dt = dshape_to_numpy(dshape)
shape_tail = tuple(map(int, dshape.shape[1:])) # tail of shape
if dshape.shape[0] == datashape.var:
shape = (0,) + shape_tail
else:
shape = (int(dshape.shape[0]),) + shape_tail
x = np.empty(shape=shape, dtype=dt)
kwargs = keyfilter(keywords.__contains__, kwargs)
expectedlen = kwargs.pop('expectedlen',
int(expected_dshape[0])
if expected_dshape is not None and
isinstance(expected_dshape[0], datashape | .Fixed)
else None)
if datashape.predicates.isrecord(dshape.measure):
return ctable(x, rootdir=uri, expectedlen=expectedlen, **kwargs)
else:
return carra | y(x, rootdir=uri, expectedlen=expectedlen, **kwargs)
@drop.register((carray, ctable))
def drop_bcolz(b, **kwargs):
b.flush()
shutil.rmtree(b.rootdir)
ooc_types |= set((carray, ctable))
|
yvaucher/vertical-ngo | __unported__/transport_plan/__openerp__.py | Python | agpl-3.0 | 1,727 | 0.001738 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Joël Grand-Guillaume
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTI | CULAR PURPOSE. See the
# GNU Affero General Public License for more description.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{"name": "Transportation Plan",
"version": "0.1",
"author": "Camptocamp",
"category": "Transportation",
"license": 'AGPL-3',
'complexity': " | normal",
"images" : [],
"website": "http://www.camptocamp.com",
"description": """
This module allows you to manage your transport
===============================================
A transport plan represent a kind of "contract" with your carrier.
""",
"depends" : ["sale",
"purchase",
"stock",
],
"demo": [],
"data": ["data/tranport_plan_sequence.xml",
"data/tranport_mode_data.xml",
"view/transport_plan.xml",
"view/transport_mode.xml",
"security/ir.model.access.csv",
],
"auto_install": False,
"test": [],
'installable': False,
}
|
theodox/mGui | mGui/events.py | Python | mit | 8,231 | 0.001701 | """
events.py
Defines a simple event handler system similar to that used in C#. Events allow
multicast delegates and arbitrary message passing. They use weak references so
they don't keep their handlers alive if they are otherwise out of scope.
"""
import weakref
import maya.utils
from functools import partial, wraps
import inspect
class Event(object):
"""
Simple event handler, similar to the standard c# event pattern. The object
raising an event calls this event object as a callable; the object will in
turn fire any of the callables it stores in its Handlers list, passing the
args and kwargs provided by the original caller.
sample usage:
test = Event()
> def a ( *args, **kwargs ):
print "A", args, kwargs
> test += a;
> test( 'arg1', 'arg2', e="fred" )
A ('arg1', 'arg2') {'e': 'fred', 'event': <Event object at 0x00000000026892E8>}
the handlers are stored as weakrefs, so they will not keep their referents alive if those referents exists
in no other scope. For example:
> x = Event()
> def test(*args, **kwargs):
> print "hello world"
> x += test
> x()
hello world
> test = None
> x()
a hard reference to a handler can be stored on another object when binding to the event, this can be used
for when handlers are defined inside another functions scope. For example:
> x = Event()
> def test(*args, **kwargs):
> print 'hello world'
> class Stash(object):
> pass
> stash = Stash()
> x += test, stash
> del test
> x()
hello world
> del stash
> x()
Handlers must exhibit the *args, **kwargs signature. It's the handler's job
to decide what to do with them but they will be passed.
Events can be given 'metadata' - arguments that are passed in at creation time:
x = Event(name = 'test_event')
def test (*args, *kwargs):
print args, kwargs
x()
{'name': 'test_event', 'event': <Event object at 0x00000000026892E8>}
Metadata added when the Event is first created will be included in every
firing of the event. Arguments and keywords can also be associated with a
particular firing:
x = Event(name = 'test_event')
def test (*args, *kwargs):
print "args:", args
print "kwargs:", kwargs
x('hello')
args: hello
kwargs: {'name': 'test_event', 'event': <Event object at 0x00000000026892E8>}
x('world')
args: world
kwargs: {'name': 'test_event', 'event': <Event object at 0x00000000026892E8>}
"""
def __init__(self, **data):
self._handlers = set()
'''Set list of handlers callables. Use a set to avoid multiple calls on one handler'''
self.data = data
self.data['event'] = self
def _add_handler(self, handler):
"""
Add a handler callable. Raises a ValueError if the argument is not callable
"""
stash = None
if isinstance(handler, tuple):
handler, stash = handler
if not callable(handler):
raise ValueError("%s is not callable", handler)
if stash is not None:
setattr(stash, '_sh_{}'.format(id(handler)), handler)
self._handlers.add(get_weak_reference(handler))
return self
def _remove_handler(self, handler):
"""
Remove a handler. Ignores handlers that are not present.
"""
stash = None
if isinstance(handler, tuple):
handler, stash = handler
try:
delattr(stash, '_sh_{}'.format(id(handler)))
except AttributeError:
pass
wr = get_weak_reference(handler)
delenda = [h for h in self._handlers if h == wr]
self._handlers = self._handlers.difference(set(delenda))
return self
def metadata(self, kwargs):
"""
returns the me
"""
md = {}
md.update(self.data)
md.update(kwargs)
return md
def _fire(self, *args, **kwargs):
"""
Call all handlers. Any decayed references will be purged.
"""
delenda = []
f | or handler in self._handlers:
try:
handler(*args, **self.metadata(kwargs))
except DeadReferenceError:
delenda.append(handler)
self._handlers = self._handlers.difference(set(delenda))
def _handler_count(self):
"""
Returns the count of the _handlers field
"""
return len([i for i in self._handlers])
| # hook up the instance methods to the base methods
# doing it this way allows you to override more neatly
# in derived classes
__call__ = _fire
__len__ = _handler_count
__iadd__ = _add_handler
__isub__ = _remove_handler
def __del__(self):
print 'event expired'
class MayaEvent(Event):
"""
Subclass of event that uses Maya.utils.executeDeferred.
"""
def _fire(self, *args, **kwargs):
"""
Call all handlers. Any decayed references will be purged.
"""
delenda = []
for handler in self._handlers:
try:
maya.utils.executeDeferred(partial(handler, *args, **self.metadata(kwargs)))
except DeadReferenceError:
delenda.append(handler)
self._handlers = self._handlers.difference(set(delenda))
__call__ = _fire
class DeadReferenceError(TypeError):
"""
Raised when a WeakMethodBound or WeakMethodFree tries to fire a method that
has been garbage collected. Used by Events to know when to drop dead
references
"""
pass
# # create weak references to both bound and unbound methods
# # hat tip to Frederic Jolliton on ActiveState
class WeakMethodBound(object):
"""
Encapsulates a weak reference to a bound method on an object. Has a
hashable ID so that Events can identify multiple references to the same
method and not duplicate them
"""
__slots__ = ('function', 'referent', 'ID', '_ref_name')
def __init__(self, f):
self.function = f.im_func
self.referent = weakref.ref(f.im_self)
self._ref_name = f.im_func.__name__
self.ID = id(f.im_self) ^ id(f.im_func.__name__)
def __call__(self, *args, **kwargs):
ref = self.referent()
if not ref is False and not ref is None:
return apply(self.function, (self.referent(),) + args, kwargs)
else:
raise DeadReferenceError("Reference to the bound method {0} no longer exists".format(self._ref_name))
def __eq__(self, other):
if not hasattr(other, 'ID'):
return False
return self.ID == other.ID
def __hash__(self):
return self.ID
class WeakMethodFree(object):
"""
Encapsulates a weak reference to an unbound method
"""
__slots__ = ('function', 'ID', '_ref_name')
def __init__(self, f):
self.function = weakref.ref(f)
self.ID = id(f)
self._ref_name = getattr(f, '__name__', "'unnamed'")
def __call__(self, *args, **kwargs):
if self.function():
return apply(self.function(), args, kwargs)
else:
raise DeadReferenceError("Reference to unbound method {0} no longer exists".format(self._ref_name))
def __eq__(self, other):
if not hasattr(other, 'ID'):
return False
return self.ID == other.ID
def __hash__(self):
return self.ID
def get_weak_reference(f):
"""
Returns a WeakMethodFree or a WeakMethodBound for the supplied function, as
appropriate
"""
try:
f.im_func
except AttributeError:
return WeakMethodFree(f)
return WeakMethodBound(f)
def event_handler(fn):
"""
decorator for making event handlers out of functions with no arguments
"""
if inspect.getargspec(fn).varargs and inspect.getargspec(fn).keywords:
return fn
@wraps(fn)
def wrapper( |
felixmatt/shyft | shyft/api/__init__.py | Python | lgpl-3.0 | 15,456 | 0.00744 | import inspect
import traceback
import warnings
import functools
from shyft.api._api import *
import numpy as np
from math import sqrt
def deprecated(message: str = ''):
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used first time and filter is set for show DeprecationWarning.
"""
def decorator_wrapper(func):
@functools.wraps(func)
def function_wrapper(*args, **kwargs):
current_call_source = '|'.join(traceback.format_stack(inspect.currentframe()))
if current_call_source not in function_wrapper.last_call_source:
warnings.warn("Class.method {} is now deprecated! {}".format(func, message),
category=DeprecationWarning, stacklevel=2)
function_wrapper.last_call_source.add(current_call_source)
return func(*args, **kwargs)
function_wrapper.last_call_source = set()
return function_wrapper
return decorator_wrapper
# Fix up vector types
DoubleVector.size = lambda self: len(self)
DoubleVector_FromNdArray = lambda x: DoubleVector.from_numpy(x)
def VectorString(v):
return str(v.to_numpy())
DoubleVector.__str__ = lambda self: VectorString(self)
Calendar.__str__ = lambda self: "Calendar('{0}')".format(self.tz_info.name())
def ShowUtcTime(v):
utc = Calendar()
return "[" + ",".join([cal.to_string(t) for t in v]) + "]"
UtcTimeVector.size = lambda self: len(self)
UtcTimeVector.__str__ = lambda self: ShowUtcTime
IntVector.size = lambda self: len(self)
IntVector.__str__ = lambda self: VectorString(self)
StringVector.size = lambda self: len(self)
#ByteVector to/from string
ByteVector.__str__ = lambda self: byte_vector_to_hex_str(self)
ByteVector.from_str = byte_vector_from_hex_str
# fix up BW and pythonic syntax for TsVector
TsVector.size = lambda self: len(self)
TsVector.push_back = lambda self, ts: self.append(ts)
# and this is for bw.compat
def percentiles(tsv:TsVector,time_axis:TimeAxis,percentile_list:IntVector)->TsVector:
return tsv.percentiles(time_axis,percentile_list)
TargetSpecificationVector.size = lambda self: len(self)
# fix bw. stl name
UtcTimeVector.push_back = lambda self, x: self.append(x)
IntVector.push_back = lambda self, x: self.append(x)
DoubleVector.push_back = lambda self, x: self.append(x)
StringVector.push_back = lambda self, x: self.append(x)
# FIx up YMDhms
YMDhms.__str__ = lambda self: "YMDhms({0},{1},{2},{3},{4},{5})".format(self.year, self.month, self.day, self.hour,
self.minute, self.second)
YMDhms.__repr__ = lambda self: "{0}({1},{2},{3},{4},{5},{6})".format(self.__class__.__name__,
self.year, self.month, self.day, self.hour,
self.minute, self.second)
YWdhms.__str__ = lambda self: "YWdhms({0},{1},{2},{3},{4},{5})".format(self.iso_year, self.iso_week, self.week_day, self.hour,
self.minute, self.second)
YWdhms.__repr__ = lambda self: "{0}({1},{2},{3},{4},{5},{6})".format(self.__class__.__name__,
self.iso_year, self.iso_week, self.week_day, self.hour,
self.minute, self.second)
# Fix up GeoPoint
GeoPoint.__str__ = lambda self: "GeoPoint({0},{1},{2})".format(self.x, self.y, self.z)
GeoPoint_difference = lambda a, b: GeoPoint.difference(a, b)
GeoPoint_xy_distance = lambda a, b: GeoPoint.xy_distance(a, b)
# Fix up LandTypeFractions
LandTypeFractions.__str__ = lambda \
self: "LandTypeFractions(glacier={0},lake={1},reservoir={2},forest={3},unspecified={4})".format(self.glacier(),
self.lake(),
self.reservoir(),
self.forest(),
self.unspecified())
# Fix up GeoCellData
def StrGeoCellData(gcd):
return "GeoCellData(mid_point={0},catchment_id={1},area={2},ltf={3})".format(str(gcd.mid_point()),
gcd.catchment_id(), gcd.area(),
str(gcd.land_type_fractions_info()))
GeoCellData.__str__ = lambda self: StrGeoCellData(self)
# Fix up UtcPeriod
UtcPeriod.to_string = lambda self: str(self)
# Fix up TimeAxis
def ta_iter(x):
x.counter = 0
return x
def ta_next(ta):
if ta.counter >= len(ta):
del ta.counter
raise StopIteration
ta.counter += 1
return ta(ta.counter - 1)
TimeAxisFixedDeltaT.__str__ = lambda self: "TimeAxisFixedDeltaT({0},{1},{2})".format(Calendar().to_string(self.start), self.delta_t, self.n)
TimeAxisFixedDeltaT.__len__ = lambda self: self.size()
TimeAxisFixedDeltaT.__call__ = lambda self, i: self.period(i)
TimeAxisFixedDeltaT.__iter__ = lambda self: ta_iter(self)
TimeAxisFixedDeltaT.__next__ = lambda self: ta_next(self)
TimeAxisCalendarDeltaT.__str__ = lambda self: "TimeAxisCalendarDeltaT(Calendar('{3}'),{0},{1},{2})".format(Calendar().to_string(self.start), self.delta_t, self.n,self.calendar.tz_info.name())
TimeAxisCalendarDeltaT.__len__ = lambda self: self.size()
TimeAxisCalendarDeltaT.__call__ = lambda self, i: self.period(i)
TimeAxisCalendarDeltaT.__iter__ = lambda self: ta_iter(self)
TimeAxisCalendarDeltaT.__next__ = lambda self: ta_next(self)
TimeAxisByPoints.__str__ = lambda self: "TimeAxisByPoints(total_period={0}, n={1},points={2} )".format(str(self.total_period()),len(self),repr(TimeAxis(self).time_points))
TimeAxisByPoints.__len__ = lambda self: self.size()
TimeAxisByPoints.__call__ = lambda self, i: self.period(i)
TimeAxisByPoints.__iter__ = lambda self: ta_iter(self)
TimeAxisByPoints.__next__ = lambda self: ta_next(self)
def nice_ta_string(time_axis):
if time_axis.timeaxis_type == TimeAxisType.FIXED:
return '{0}'.format(str(time_axis.fixed_dt))
if time_axis.timeaxis_type == TimeAxisType.CALENDAR:
return '{0}'.format(str(time_axis.calendar_dt))
return '{0}'.format(str(time_axis.point_dt))
TimeAxis.__str__ = lambda self: nice_ta_string(self)
TimeAxis.__len__ = lambda self: self.size()
TimeAxis.__call__ = lambda self, i: self.period(i)
TimeAxis.__iter__ = lambda self: ta_iter(self)
TimeAxis.__next__ = lambda self: ta_next(self)
TimeAxis.time_points = property( lambda self: time_axis_extract_time_points(self).to_numpy(),doc= \
"""
extract all time-points from a TimeAxis
like
[ time_axis.time(i) ].append(time_axis.total_period().end) if time_axis.size() else []
Parameters
----------
time_axis : TimeAxis
Returns
-------
time_points:numpy.array(dtype=np.int64)
[ time_axis.time(i) ].append(time_axis.total_period().end)
""")
# fix up property on timeseries
TimeSeries.time_axis = property(lambda self: self.get_time_axis(), doc="returns the time_axis of the timeseries")
TimeSeries.__len__ = lambda self: self.size()
TimeSeries.v = property(lambda self: self.values, doc="returns the point-values of timeseries, alias for | .values")
TimeSeries.kling_gupta = lambda self, oth | er_ts, s_r=1.0, s_a=1.0, s_b=1.0: kling_gupta(self, other_ts,
self.get_time_axis(), s_r, s_a,
s_b)
TimeSeries.kling_gupta.__doc__ = \
"""
computes the kling_gupta correlation using self as observation, and self.time_axis as
the comparison time-axis
Parameters
----------
other_ts : Timeseries
t |
TE-ToshiakiTanaka/bantorra.old | util/cmd.py | Python | mit | 1,792 | 0.005022 | import sys
import os
import time
import imp
import subprocess
import multiprocessing
from bantorra.util import define
from bantorra.util.log import LOG as L
TIMEOUT = 30
class Process(multiprocessing.Process):
def __init__(self, command, queue):
self.command = command
self.queue = queue
multiprocessing.Process.__init__(self)
def run(self):
L.info("Command Send : %s" % self.command)
args = self.command.split(" ")
subproc_args = { 'stdin' : subprocess.PIPE,
| 'stdout' : subprocess.PIPE,
'stderr' : subprocess.PIPE,
}
try:
proc = subprocess.Popen(args, **subproc_ar | gs)
except OSError:
L.info("Failed to execute command: %s" % args[0])
sys.exit(1)
(stdout, stderr) = proc.communicate()
code = proc.wait()
L.debug("Command Resturn Code: %d" % code)
self.queue.put(stdout)
def kill(self):
L.debug("Kill This Process.")
self.terminate()
class Console(object):
def __init__(self):
self.queue = multiprocessing.Queue()
def __exec(self, cmd, timeout=TIMEOUT):
proc = Process(cmd, self.queue)
proc.start()
proc.join(timeout)
if proc.is_alive():
proc.kill()
time.sleep(3)
L.debug("proc.terminate. %s" % proc.is_alive())
if self.queue.empty():
return None
return self.queue.get()
def execute(self, cmd, timeout=TIMEOUT):
return self.__exec(cmd, timeout=timeout)
CONSOLE = Console()
if __name__ == "__main__":
print CONSOLE.execute("python C:\\Users\\setsulla\\Repository\\python\\bantorra\\bin\\kancolle\\server.py -p 5000")
|
lesiavl/selenium_perfomance_tests | pzo_load/load_test_file.py | Python | apache-2.0 | 5,242 | 0.001336 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from page_objects import *
from selenium import webdriver
import datetime
import time
import Queue
import threading
import traceback
tenders = Queue.Queue()
tenders_ids = []
tenders_threads = 2
bids = Queue.Queue()
bids_failed = {}
runs = Queue.Queue()
class CreateTenders(threading.Thread):
exited = False
def __init__(self, queue, driver):
threading.Thread.__init__(self)
self.queue = queue
self.driver = driver
self.login_page_owner = LoginPage(
owner_users['email'], owner_users['password'], self.driver
)
self.create_tender_page = CreateTenderPage(self.driver)
self.find_tender = FindTenderPage(self.driver)
def run(self):
while True:
# Wait for start
self.queue.get()
# Process business logic
self.driver.get(broker['url'])
try:
self.login_page_owner.login_as_owner()
self.driver.get(create_tender_url)
self.create_tender_page.create_tender()
tenders_ids.append(self.find_tender.get_tender_id())
except Exception as error:
# self.driver.close()
self.exited = True
print (error)
traceback.print_exc()
raise error
finally:
self.queue.task_done()
class MakeTendersBids(threading.Thread):
exited = False
def __init__(self, queue, user, password, tender_id, driver):
threading.Thread.__init__(self)
self.queue = queue
self.driver = driver
self.tender_id = tender_id
self.login_page_provider = LoginPage(user, password, self.driver)
self.find_tender = FindTenderPage(self.driver)
self.make_bid_page = MakeBidPage(self.driver)
def run(self):
while True:
# Wait for start
self.queue.get()
self.driver.get(broker['url'])
# Process business logic
try:
self.login_page_provider.login_as_provider()
self.find_tender.find_tender(self.tender_id)
if not self.make_bid_page.make_bid():
bids_failed[self.tender_id] = 'failed'
print('Bid failed for tender: {}'.format(self.tender_id))
return
bids_failed[self.tender_id] = 'passed'
print('Bid success for tender {}'.format(self.tender_id))
except Exception as error:
# self.driver.close()
self.exited = False
print(error)
traceback.print_exc()
raise error
finally:
self.queue.task_done()
class RunTenderBids(threading.Thread):
def __init__(self, queue, driver, providerAndTender):
threading.Thread.__init__(self)
self.queue = queue
self.driver = driver
self.make_bid_page = MakeBidPage(self.driver)
self.providerAndTender = providerAndTender
def run(self):
while True:
# Wait for start
self.queue.get()
# Process business logic
try:
with open('load_results.txt', 'a') as fl:
fl.write('{} started bid for {} —---------------- STARTED\n'.format(self.providerAndTender, datetime.datetime.now()))
self.make_bid_page.run_bid()
fl.write('{} made bid for {} —---------------- FINISHED\n'.format(self.providerAndTender, datetime.datetime.now()))
fl.close()
finally:
self.queue.task_done()
start = time.time()
# Start creating tenders
print('Start creating tenders...')
for i in range(tenders_threads):
driver = webdriver.Chrome()
driver.set_window_size(1200, 1000)
t = CreateTenders(tenders, driver)
t.setDaemon(True)
t.start()
for i in range(tenders_threads):
tenders.put(True)
# Wait for all to complete
tenders.join()
print('Tenders created - ' + ', '.join(tenders_ids))
# Start making tenders bids
print('Start making bids...')
drivers = {}
for tid in tenders_ids:
for provider in provider_users.items():
driver = webdriver.Chrome()
driver.set_window_size(1200, 1000)
drivers['{} {}'.format(provider[0], tid)] = driver
b = MakeTendersBids(bids, provider[0], provider[1], tid, driver)
b.setDaemon(True)
print(provider[0], tid)
b.start()
for tid in tenders_ids:
for provider in provider_users.items():
bids.put(True)
bids.join()
print('Bids made')
print(bids_failed)
with open('load_results.txt', 'a') as f:
| f.write('{} failed \n'.format(bids_failed))
f.close()
# Start making by clicking simultaneously
print('Start running bids...')
for driver in drivers.keys():
c = RunTenderBids(runs, drivers[driver], driver)
c.setDaemon(True)
c.start()
|
for driver in drivers:
runs.put(True)
runs.join()
print('Runs performed')
print("Elapsed Time: %s" % (time.time() - start))
for driver in drivers:
drivers[driver].close()
|
mozilla/bztools | auto_nag/scripts/defect_with_please_or_enable.py | Python | bsd-3-clause | 1,009 | 0.000991 | # This Source Code Form is subjec | t to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from auto_nag.bzcleaner import BzCleaner
class DefectWithPlease(BzC | leaner):
def __init__(self):
super(DefectWithPlease, self).__init__()
def description(self):
return (
"Defect with description starting with 'Please', 'Enable', 'Disable', etc"
)
def ignore_date(self):
return True
def get_bz_params(self, date):
days_lookup = self.get_config("days_lookup")
params = {
"resolution": "---",
"short_desc": r"^(please|enable|disable|implement)",
"short_desc_type": "regexp",
"bug_type": ["defect"],
"f1": "days_elapsed",
"o1": "lessthan",
"v1": days_lookup,
}
return params
if __name__ == "__main__":
DefectWithPlease().run()
|
protonn/Electrum-Cash | lib/tests/test_simple_config.py | Python | mit | 11,045 | 0.00498 | import ast
import sys
import os
import unittest
import tempfile
import shutil
import json
from StringIO import StringIO
from lib.simple_config import (SimpleConfig, read_system_config,
read_user_config)
class Test_SimpleConfig(unittest.TestCase):
def setUp(self):
super(Test_SimpleConfig, self).setUp()
# make sure "read_user_config" and "user_dir" return a temporary directory.
self.electrum_dir = tempfile.mkdtemp()
# Do the same for the user dir to avoid overwriting the real configuration
# for development machines with electrum installed :)
self.user_dir = tempfile.mkdtemp()
self.options = {"electrum_path": self.electrum_dir}
self._saved_stdout = sys.stdout
self._stdout_buffer = StringIO()
sys.stdout = self._stdout_buffer
def tearDown(self):
super(Test_SimpleConfig, self).tearDown()
# Remove the temporary directory after each test (to make sure we don't
# pollute /tmp for nothing.
shutil.rmtree(self.electrum_dir)
shutil.rmtree(self.user_dir)
# Restore the "real" stdout
sys.stdout = self._saved_stdout
def test_simple_config_key_rename(self):
"""auto_cycle was renamed auto_connect"""
fake_read_system = lambda : {}
fake_read_user = lambda _: {"auto_cycle": True}
read_user_dir = lambda : self.user_dir
config = SimpleConfig(options=self.options,
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
self.assertEqual(config.get("auto_connect"), True)
self.assertEqual(config.get("auto_cycle"), None)
fake_read_user = lambda _: {"auto_connect": False, "auto_cycle": True}
config = SimpleConfig(options=self.options,
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
self.assertEqual(config.get("auto_connect"), False)
self.assertEqual(config.get("auto_cycle"), None)
def test_simple_config_command_line_overrides_everything(self):
"""Options passed by command line override all other configuration
sources"""
fake_read_system = lambda : {"electrum_path": "a"}
fake_read_user = lambda _: {"electrum_path": "b"}
read_user_dir = lambda : self.user_dir
config = SimpleConfig(options=self.options,
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
self.assertEqual(self.options.get("electrum_path"),
config.get("electrum_path"))
def test_simple_config_user_config_overrides_system_config(self):
"""Options passed in user config override system config."""
fake_read_system = lambda : {"electrum_path": self.electrum_dir}
fake_read_user = lambda _: {"electrum_path": "b"}
read_user_dir = lambda : self.user_dir
config = SimpleConfig(options={},
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
self.assertEqual("b", config.get("electrum_path"))
def test_simple_config_system_config_ignored_if | _portable(self):
"""If electrum is started with the "portable" flag, system
configuration is completely ignored."""
fake_read_system = lambda : {"some_key": "some_value"}
fake_read_user = lambda _: {}
read_user_dir = lambda : s | elf.user_dir
config = SimpleConfig(options={"portable": True},
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
self.assertEqual(config.get("some_key"), None)
def test_simple_config_user_config_is_used_if_others_arent_specified(self):
"""If no system-wide configuration and no command-line options are
specified, the user configuration is used instead."""
fake_read_system = lambda : {}
fake_read_user = lambda _: {"electrum_path": self.electrum_dir}
read_user_dir = lambda : self.user_dir
config = SimpleConfig(options={},
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
self.assertEqual(self.options.get("electrum_path"),
config.get("electrum_path"))
def test_cannot_set_options_passed_by_command_line(self):
fake_read_system = lambda : {}
fake_read_user = lambda _: {"electrum_path": "b"}
read_user_dir = lambda : self.user_dir
config = SimpleConfig(options=self.options,
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
config.set_key("electrum_path", "c")
self.assertEqual(self.options.get("electrum_path"),
config.get("electrum_path"))
def test_can_set_options_from_system_config(self):
fake_read_system = lambda : {"electrum_path": self.electrum_dir}
fake_read_user = lambda _: {}
read_user_dir = lambda : self.user_dir
config = SimpleConfig(options={},
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
config.set_key("electrum_path", "c")
self.assertEqual("c", config.get("electrum_path"))
def test_can_set_options_set_in_user_config(self):
another_path = tempfile.mkdtemp()
fake_read_system = lambda : {}
fake_read_user = lambda _: {"electrum_path": self.electrum_dir}
read_user_dir = lambda : self.user_dir
config = SimpleConfig(options={},
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
config.set_key("electrum_path", another_path)
self.assertEqual(another_path, config.get("electrum_path"))
def test_can_set_options_from_system_config_if_portable(self):
"""If the "portable" flag is set, the user can overwrite system
configuration options."""
another_path = tempfile.mkdtemp()
fake_read_system = lambda : {"electrum_path": self.electrum_dir}
fake_read_user = lambda _: {}
read_user_dir = lambda : self.user_dir
config = SimpleConfig(options={"portable": True},
read_system_config_function=fake_read_system,
read_user_config_function=fake_read_user,
read_user_dir_function=read_user_dir)
config.set_key("electrum_path", another_path)
self.assertEqual(another_path, config.get("electrum_path"))
def test_user_config_is_not_written_with_read_only_config(self):
"""The user config does not contain command-line options or system
options when saved."""
fake_read_system = lambda : {"something": "b"}
fake_read_user = lambda _: {"something": "a"}
read_user_dir = lambda : self.user_dir
self.options.update({"something": "c"})
config = SimpleConfig(options=self.options,
read_system_config_function=fake_read_system,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.